Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* SCTP kernel implementation
  3 * (C) Copyright IBM Corp. 2001, 2004
  4 * Copyright (c) 1999-2000 Cisco, Inc.
  5 * Copyright (c) 1999-2001 Motorola, Inc.
  6 *
  7 * This file is part of the SCTP kernel implementation
  8 *
  9 * These functions handle output processing.
 10 *
 11 * Please send any bug reports or fixes you make to the
 12 * email address(es):
 13 *    lksctp developers <linux-sctp@vger.kernel.org>
 14 *
 15 * Written or modified by:
 16 *    La Monte H.P. Yarroll <piggy@acm.org>
 17 *    Karl Knutson          <karl@athena.chicago.il.us>
 18 *    Jon Grimm             <jgrimm@austin.ibm.com>
 19 *    Sridhar Samudrala     <sri@us.ibm.com>
 20 */
 21
 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 23
 24#include <linux/types.h>
 25#include <linux/kernel.h>
 26#include <linux/wait.h>
 27#include <linux/time.h>
 28#include <linux/ip.h>
 29#include <linux/ipv6.h>
 30#include <linux/init.h>
 31#include <linux/slab.h>
 32#include <net/inet_ecn.h>
 33#include <net/ip.h>
 34#include <net/icmp.h>
 35#include <net/net_namespace.h>
 36
 37#include <linux/socket.h> /* for sa_family_t */
 38#include <net/sock.h>
 39
 40#include <net/sctp/sctp.h>
 41#include <net/sctp/sm.h>
 42#include <net/sctp/checksum.h>
 43
 44/* Forward declarations for private helpers. */
 45static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
 46						 struct sctp_chunk *chunk);
 47static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
 48						  struct sctp_chunk *chunk);
 49static void sctp_packet_append_data(struct sctp_packet *packet,
 50				    struct sctp_chunk *chunk);
 51static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
 52					   struct sctp_chunk *chunk,
 53					   u16 chunk_len);
 54
 55static void sctp_packet_reset(struct sctp_packet *packet)
 56{
 57	/* sctp_packet_transmit() relies on this to reset size to the
 58	 * current overhead after sending packets.
 59	 */
 60	packet->size = packet->overhead;
 61
 62	packet->has_cookie_echo = 0;
 63	packet->has_sack = 0;
 64	packet->has_data = 0;
 65	packet->has_auth = 0;
 66	packet->ipfragok = 0;
 67	packet->auth = NULL;
 68}
 69
 70/* Config a packet.
 71 * This appears to be a followup set of initializations.
 72 */
 73void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
 74			int ecn_capable)
 75{
 76	struct sctp_transport *tp = packet->transport;
 77	struct sctp_association *asoc = tp->asoc;
 78	struct sctp_sock *sp = NULL;
 79	struct sock *sk;
 80
 81	pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
 82	packet->vtag = vtag;
 83
 84	/* do the following jobs only once for a flush schedule */
 85	if (!sctp_packet_empty(packet))
 86		return;
 87
 88	/* set packet max_size with pathmtu, then calculate overhead */
 89	packet->max_size = tp->pathmtu;
 90
 91	if (asoc) {
 92		sk = asoc->base.sk;
 93		sp = sctp_sk(sk);
 94	}
 95	packet->overhead = sctp_mtu_payload(sp, 0, 0);
 96	packet->size = packet->overhead;
 97
 98	if (!asoc)
 99		return;
100
101	/* update dst or transport pathmtu if in need */
102	if (!sctp_transport_dst_check(tp)) {
103		sctp_transport_route(tp, NULL, sp);
104		if (asoc->param_flags & SPP_PMTUD_ENABLE)
105			sctp_assoc_sync_pmtu(asoc);
106	} else if (!sctp_transport_pmtu_check(tp)) {
107		if (asoc->param_flags & SPP_PMTUD_ENABLE)
 
108			sctp_assoc_sync_pmtu(asoc);
109	}
110
111	if (asoc->pmtu_pending) {
112		if (asoc->param_flags & SPP_PMTUD_ENABLE)
113			sctp_assoc_sync_pmtu(asoc);
114		asoc->pmtu_pending = 0;
115	}
116
117	/* If there a is a prepend chunk stick it on the list before
118	 * any other chunks get appended.
119	 */
120	if (ecn_capable) {
121		struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
122
123		if (chunk)
124			sctp_packet_append_chunk(packet, chunk);
125	}
126
127	if (!tp->dst)
128		return;
129
130	/* set packet max_size with gso_max_size if gso is enabled*/
131	rcu_read_lock();
132	if (__sk_dst_get(sk) != tp->dst) {
133		dst_hold(tp->dst);
134		sk_setup_caps(sk, tp->dst);
135	}
136	packet->max_size = sk_can_gso(sk) ? tp->dst->dev->gso_max_size
 
137					  : asoc->pathmtu;
138	rcu_read_unlock();
139}
140
141/* Initialize the packet structure. */
142void sctp_packet_init(struct sctp_packet *packet,
143		      struct sctp_transport *transport,
144		      __u16 sport, __u16 dport)
145{
146	pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
147
148	packet->transport = transport;
149	packet->source_port = sport;
150	packet->destination_port = dport;
151	INIT_LIST_HEAD(&packet->chunk_list);
152	/* The overhead will be calculated by sctp_packet_config() */
153	packet->overhead = 0;
154	sctp_packet_reset(packet);
155	packet->vtag = 0;
156}
157
158/* Free a packet.  */
159void sctp_packet_free(struct sctp_packet *packet)
160{
161	struct sctp_chunk *chunk, *tmp;
162
163	pr_debug("%s: packet:%p\n", __func__, packet);
164
165	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
166		list_del_init(&chunk->list);
167		sctp_chunk_free(chunk);
168	}
169}
170
171/* This routine tries to append the chunk to the offered packet. If adding
172 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
173 * is not present in the packet, it transmits the input packet.
174 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
175 * as it can fit in the packet, but any more data that does not fit in this
176 * packet can be sent only after receiving the COOKIE_ACK.
177 */
178enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
179					  struct sctp_chunk *chunk,
180					  int one_packet, gfp_t gfp)
181{
182	enum sctp_xmit retval;
183
184	pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__,
185		 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
186
187	switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
188	case SCTP_XMIT_PMTU_FULL:
189		if (!packet->has_cookie_echo) {
190			int error = 0;
191
192			error = sctp_packet_transmit(packet, gfp);
193			if (error < 0)
194				chunk->skb->sk->sk_err = -error;
195
196			/* If we have an empty packet, then we can NOT ever
197			 * return PMTU_FULL.
198			 */
199			if (!one_packet)
200				retval = sctp_packet_append_chunk(packet,
201								  chunk);
202		}
203		break;
204
205	case SCTP_XMIT_RWND_FULL:
206	case SCTP_XMIT_OK:
207	case SCTP_XMIT_DELAY:
208		break;
209	}
210
211	return retval;
212}
213
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
214/* Try to bundle an auth chunk into the packet. */
215static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
216					      struct sctp_chunk *chunk)
217{
218	struct sctp_association *asoc = pkt->transport->asoc;
219	enum sctp_xmit retval = SCTP_XMIT_OK;
220	struct sctp_chunk *auth;
221
222	/* if we don't have an association, we can't do authentication */
223	if (!asoc)
224		return retval;
225
226	/* See if this is an auth chunk we are bundling or if
227	 * auth is already bundled.
228	 */
229	if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
230		return retval;
231
232	/* if the peer did not request this chunk to be authenticated,
233	 * don't do it
234	 */
235	if (!chunk->auth)
236		return retval;
237
238	auth = sctp_make_auth(asoc, chunk->shkey->key_id);
239	if (!auth)
240		return retval;
241
242	auth->shkey = chunk->shkey;
243	sctp_auth_shkey_hold(auth->shkey);
244
245	retval = __sctp_packet_append_chunk(pkt, auth);
246
247	if (retval != SCTP_XMIT_OK)
248		sctp_chunk_free(auth);
249
250	return retval;
251}
252
253/* Try to bundle a SACK with the packet. */
254static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
255					      struct sctp_chunk *chunk)
256{
257	enum sctp_xmit retval = SCTP_XMIT_OK;
258
259	/* If sending DATA and haven't aleady bundled a SACK, try to
260	 * bundle one in to the packet.
261	 */
262	if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
263	    !pkt->has_cookie_echo) {
264		struct sctp_association *asoc;
265		struct timer_list *timer;
266		asoc = pkt->transport->asoc;
267		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
268
269		/* If the SACK timer is running, we have a pending SACK */
270		if (timer_pending(timer)) {
271			struct sctp_chunk *sack;
272
273			if (pkt->transport->sack_generation !=
274			    pkt->transport->asoc->peer.sack_generation)
275				return retval;
276
277			asoc->a_rwnd = asoc->rwnd;
278			sack = sctp_make_sack(asoc);
279			if (sack) {
280				retval = __sctp_packet_append_chunk(pkt, sack);
281				if (retval != SCTP_XMIT_OK) {
282					sctp_chunk_free(sack);
283					goto out;
284				}
285				SCTP_INC_STATS(sock_net(asoc->base.sk),
286					       SCTP_MIB_OUTCTRLCHUNKS);
287				asoc->stats.octrlchunks++;
288				asoc->peer.sack_needed = 0;
289				if (del_timer(timer))
290					sctp_association_put(asoc);
291			}
292		}
293	}
294out:
295	return retval;
296}
297
298
299/* Append a chunk to the offered packet reporting back any inability to do
300 * so.
301 */
302static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
303						 struct sctp_chunk *chunk)
304{
305	__u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
306	enum sctp_xmit retval = SCTP_XMIT_OK;
307
308	/* Check to see if this chunk will fit into the packet */
309	retval = sctp_packet_will_fit(packet, chunk, chunk_len);
310	if (retval != SCTP_XMIT_OK)
311		goto finish;
312
313	/* We believe that this chunk is OK to add to the packet */
314	switch (chunk->chunk_hdr->type) {
315	case SCTP_CID_DATA:
316	case SCTP_CID_I_DATA:
317		/* Account for the data being in the packet */
318		sctp_packet_append_data(packet, chunk);
319		/* Disallow SACK bundling after DATA. */
320		packet->has_sack = 1;
321		/* Disallow AUTH bundling after DATA */
322		packet->has_auth = 1;
323		/* Let it be knows that packet has DATA in it */
324		packet->has_data = 1;
325		/* timestamp the chunk for rtx purposes */
326		chunk->sent_at = jiffies;
327		/* Mainly used for prsctp RTX policy */
328		chunk->sent_count++;
329		break;
330	case SCTP_CID_COOKIE_ECHO:
331		packet->has_cookie_echo = 1;
332		break;
333
334	case SCTP_CID_SACK:
335		packet->has_sack = 1;
336		if (chunk->asoc)
337			chunk->asoc->stats.osacks++;
338		break;
339
340	case SCTP_CID_AUTH:
341		packet->has_auth = 1;
342		packet->auth = chunk;
343		break;
344	}
345
346	/* It is OK to send this chunk.  */
347	list_add_tail(&chunk->list, &packet->chunk_list);
348	packet->size += chunk_len;
349	chunk->transport = packet->transport;
350finish:
351	return retval;
352}
353
354/* Append a chunk to the offered packet reporting back any inability to do
355 * so.
356 */
357enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
358					struct sctp_chunk *chunk)
359{
360	enum sctp_xmit retval = SCTP_XMIT_OK;
361
362	pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
363
364	/* Data chunks are special.  Before seeing what else we can
365	 * bundle into this packet, check to see if we are allowed to
366	 * send this DATA.
367	 */
368	if (sctp_chunk_is_data(chunk)) {
369		retval = sctp_packet_can_append_data(packet, chunk);
370		if (retval != SCTP_XMIT_OK)
371			goto finish;
372	}
373
374	/* Try to bundle AUTH chunk */
375	retval = sctp_packet_bundle_auth(packet, chunk);
376	if (retval != SCTP_XMIT_OK)
377		goto finish;
378
379	/* Try to bundle SACK chunk */
380	retval = sctp_packet_bundle_sack(packet, chunk);
381	if (retval != SCTP_XMIT_OK)
382		goto finish;
383
384	retval = __sctp_packet_append_chunk(packet, chunk);
 
 
 
 
385
386finish:
387	return retval;
388}
389
390static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
391{
392	if (SCTP_OUTPUT_CB(head)->last == head)
393		skb_shinfo(head)->frag_list = skb;
394	else
395		SCTP_OUTPUT_CB(head)->last->next = skb;
396	SCTP_OUTPUT_CB(head)->last = skb;
397
398	head->truesize += skb->truesize;
399	head->data_len += skb->len;
400	head->len += skb->len;
401	refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
402
403	__skb_header_release(skb);
404}
405
406static int sctp_packet_pack(struct sctp_packet *packet,
407			    struct sk_buff *head, int gso, gfp_t gfp)
408{
409	struct sctp_transport *tp = packet->transport;
410	struct sctp_auth_chunk *auth = NULL;
411	struct sctp_chunk *chunk, *tmp;
412	int pkt_count = 0, pkt_size;
413	struct sock *sk = head->sk;
414	struct sk_buff *nskb;
415	int auth_len = 0;
416
417	if (gso) {
418		skb_shinfo(head)->gso_type = sk->sk_gso_type;
419		SCTP_OUTPUT_CB(head)->last = head;
420	} else {
421		nskb = head;
422		pkt_size = packet->size;
423		goto merge;
424	}
425
426	do {
427		/* calculate the pkt_size and alloc nskb */
428		pkt_size = packet->overhead;
429		list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
430					 list) {
431			int padded = SCTP_PAD4(chunk->skb->len);
432
433			if (chunk == packet->auth)
434				auth_len = padded;
435			else if (auth_len + padded + packet->overhead >
436				 tp->pathmtu)
437				return 0;
438			else if (pkt_size + padded > tp->pathmtu)
439				break;
440			pkt_size += padded;
441		}
442		nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
443		if (!nskb)
444			return 0;
445		skb_reserve(nskb, packet->overhead + MAX_HEADER);
446
447merge:
448		/* merge chunks into nskb and append nskb into head list */
449		pkt_size -= packet->overhead;
450		list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
451			int padding;
452
453			list_del_init(&chunk->list);
454			if (sctp_chunk_is_data(chunk)) {
455				if (!sctp_chunk_retransmitted(chunk) &&
456				    !tp->rto_pending) {
457					chunk->rtt_in_progress = 1;
458					tp->rto_pending = 1;
459				}
460			}
461
462			padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
463			if (padding)
464				skb_put_zero(chunk->skb, padding);
465
466			if (chunk == packet->auth)
467				auth = (struct sctp_auth_chunk *)
468							skb_tail_pointer(nskb);
469
470			skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
471
472			pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
473				 chunk,
474				 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
475				 chunk->has_tsn ? "TSN" : "No TSN",
476				 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
477				 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
478				 chunk->rtt_in_progress);
479
480			pkt_size -= SCTP_PAD4(chunk->skb->len);
481
482			if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
483				sctp_chunk_free(chunk);
484
485			if (!pkt_size)
486				break;
487		}
488
489		if (auth) {
490			sctp_auth_calculate_hmac(tp->asoc, nskb, auth,
491						 packet->auth->shkey, gfp);
492			/* free auth if no more chunks, or add it back */
493			if (list_empty(&packet->chunk_list))
494				sctp_chunk_free(packet->auth);
495			else
496				list_add(&packet->auth->list,
497					 &packet->chunk_list);
498		}
499
500		if (gso)
501			sctp_packet_gso_append(head, nskb);
502
503		pkt_count++;
504	} while (!list_empty(&packet->chunk_list));
505
506	if (gso) {
507		memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
508					sizeof(struct inet6_skb_parm)));
509		skb_shinfo(head)->gso_segs = pkt_count;
510		skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
511		rcu_read_lock();
512		if (skb_dst(head) != tp->dst) {
513			dst_hold(tp->dst);
514			sk_setup_caps(sk, tp->dst);
515		}
516		rcu_read_unlock();
517		goto chksum;
518	}
519
520	if (sctp_checksum_disable)
521		return 1;
522
523	if (!(skb_dst(head)->dev->features & NETIF_F_SCTP_CRC) ||
524	    dst_xfrm(skb_dst(head)) || packet->ipfragok) {
525		struct sctphdr *sh =
526			(struct sctphdr *)skb_transport_header(head);
527
528		sh->checksum = sctp_compute_cksum(head, 0);
529	} else {
530chksum:
531		head->ip_summed = CHECKSUM_PARTIAL;
532		head->csum_not_inet = 1;
533		head->csum_start = skb_transport_header(head) - head->head;
534		head->csum_offset = offsetof(struct sctphdr, checksum);
535	}
536
537	return pkt_count;
538}
539
540/* All packets are sent to the network through this function from
541 * sctp_outq_tail().
542 *
543 * The return value is always 0 for now.
544 */
545int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
546{
547	struct sctp_transport *tp = packet->transport;
548	struct sctp_association *asoc = tp->asoc;
549	struct sctp_chunk *chunk, *tmp;
550	int pkt_count, gso = 0;
551	struct dst_entry *dst;
552	struct sk_buff *head;
553	struct sctphdr *sh;
554	struct sock *sk;
555
556	pr_debug("%s: packet:%p\n", __func__, packet);
557	if (list_empty(&packet->chunk_list))
558		return 0;
559	chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
560	sk = chunk->skb->sk;
561
562	/* check gso */
563	if (packet->size > tp->pathmtu && !packet->ipfragok) {
564		if (!sk_can_gso(sk)) {
565			pr_err_once("Trying to GSO but underlying device doesn't support it.");
566			goto out;
 
 
 
 
567		}
568		gso = 1;
569	}
570
571	/* alloc head skb */
572	head = alloc_skb((gso ? packet->overhead : packet->size) +
573			 MAX_HEADER, gfp);
574	if (!head)
575		goto out;
576	skb_reserve(head, packet->overhead + MAX_HEADER);
577	skb_set_owner_w(head, sk);
578
579	/* set sctp header */
580	sh = skb_push(head, sizeof(struct sctphdr));
581	skb_reset_transport_header(head);
582	sh->source = htons(packet->source_port);
583	sh->dest = htons(packet->destination_port);
584	sh->vtag = htonl(packet->vtag);
585	sh->checksum = 0;
586
587	/* drop packet if no dst */
588	dst = dst_clone(tp->dst);
589	if (!dst) {
590		IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
591		kfree_skb(head);
592		goto out;
593	}
594	skb_dst_set(head, dst);
595
596	/* pack up chunks */
597	pkt_count = sctp_packet_pack(packet, head, gso, gfp);
598	if (!pkt_count) {
599		kfree_skb(head);
600		goto out;
601	}
602	pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
603
604	/* start autoclose timer */
605	if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
606	    asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
607		struct timer_list *timer =
608			&asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
609		unsigned long timeout =
610			asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
611
612		if (!mod_timer(timer, jiffies + timeout))
613			sctp_association_hold(asoc);
614	}
615
616	/* sctp xmit */
617	tp->af_specific->ecn_capable(sk);
618	if (asoc) {
619		asoc->stats.opackets += pkt_count;
620		if (asoc->peer.last_sent_to != tp)
621			asoc->peer.last_sent_to = tp;
622	}
623	head->ignore_df = packet->ipfragok;
624	if (tp->dst_pending_confirm)
625		skb_set_dst_pending_confirm(head, 1);
626	/* neighbour should be confirmed on successful transmission or
627	 * positive error
628	 */
629	if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
630	    tp->dst_pending_confirm)
631		tp->dst_pending_confirm = 0;
632
633out:
634	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
635		list_del_init(&chunk->list);
636		if (!sctp_chunk_is_data(chunk))
637			sctp_chunk_free(chunk);
638	}
639	sctp_packet_reset(packet);
640	return 0;
641}
642
643/********************************************************************
644 * 2nd Level Abstractions
645 ********************************************************************/
646
647/* This private function check to see if a chunk can be added */
648static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
649						  struct sctp_chunk *chunk)
650{
651	size_t datasize, rwnd, inflight, flight_size;
652	struct sctp_transport *transport = packet->transport;
653	struct sctp_association *asoc = transport->asoc;
654	struct sctp_outq *q = &asoc->outqueue;
655
656	/* RFC 2960 6.1  Transmission of DATA Chunks
657	 *
658	 * A) At any given time, the data sender MUST NOT transmit new data to
659	 * any destination transport address if its peer's rwnd indicates
660	 * that the peer has no buffer space (i.e. rwnd is 0, see Section
661	 * 6.2.1).  However, regardless of the value of rwnd (including if it
662	 * is 0), the data sender can always have one DATA chunk in flight to
663	 * the receiver if allowed by cwnd (see rule B below).  This rule
664	 * allows the sender to probe for a change in rwnd that the sender
665	 * missed due to the SACK having been lost in transit from the data
666	 * receiver to the data sender.
667	 */
668
669	rwnd = asoc->peer.rwnd;
670	inflight = q->outstanding_bytes;
671	flight_size = transport->flight_size;
672
673	datasize = sctp_data_size(chunk);
674
675	if (datasize > rwnd && inflight > 0)
676		/* We have (at least) one data chunk in flight,
677		 * so we can't fall back to rule 6.1 B).
678		 */
679		return SCTP_XMIT_RWND_FULL;
680
681	/* RFC 2960 6.1  Transmission of DATA Chunks
682	 *
683	 * B) At any given time, the sender MUST NOT transmit new data
684	 * to a given transport address if it has cwnd or more bytes
685	 * of data outstanding to that transport address.
686	 */
687	/* RFC 7.2.4 & the Implementers Guide 2.8.
688	 *
689	 * 3) ...
690	 *    When a Fast Retransmit is being performed the sender SHOULD
691	 *    ignore the value of cwnd and SHOULD NOT delay retransmission.
692	 */
693	if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
694	    flight_size >= transport->cwnd)
695		return SCTP_XMIT_RWND_FULL;
696
697	/* Nagle's algorithm to solve small-packet problem:
698	 * Inhibit the sending of new chunks when new outgoing data arrives
699	 * if any previously transmitted data on the connection remains
700	 * unacknowledged.
701	 */
702
703	if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
704	    !asoc->force_delay)
705		/* Nothing unacked */
706		return SCTP_XMIT_OK;
707
708	if (!sctp_packet_empty(packet))
709		/* Append to packet */
710		return SCTP_XMIT_OK;
711
712	if (!sctp_state(asoc, ESTABLISHED))
713		return SCTP_XMIT_OK;
714
715	/* Check whether this chunk and all the rest of pending data will fit
716	 * or delay in hopes of bundling a full sized packet.
717	 */
718	if (chunk->skb->len + q->out_qlen > transport->pathmtu -
719	    packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
720		/* Enough data queued to fill a packet */
721		return SCTP_XMIT_OK;
722
723	/* Don't delay large message writes that may have been fragmented */
724	if (!chunk->msg->can_delay)
725		return SCTP_XMIT_OK;
726
727	/* Defer until all data acked or packet full */
728	return SCTP_XMIT_DELAY;
729}
730
731/* This private function does management things when adding DATA chunk */
732static void sctp_packet_append_data(struct sctp_packet *packet,
733				struct sctp_chunk *chunk)
734{
735	struct sctp_transport *transport = packet->transport;
736	size_t datasize = sctp_data_size(chunk);
737	struct sctp_association *asoc = transport->asoc;
738	u32 rwnd = asoc->peer.rwnd;
739
740	/* Keep track of how many bytes are in flight over this transport. */
741	transport->flight_size += datasize;
742
743	/* Keep track of how many bytes are in flight to the receiver. */
744	asoc->outqueue.outstanding_bytes += datasize;
745
746	/* Update our view of the receiver's rwnd. */
747	if (datasize < rwnd)
748		rwnd -= datasize;
749	else
750		rwnd = 0;
751
752	asoc->peer.rwnd = rwnd;
753	sctp_chunk_assign_tsn(chunk);
754	asoc->stream.si->assign_number(chunk);
755}
756
757static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
758					   struct sctp_chunk *chunk,
759					   u16 chunk_len)
760{
761	enum sctp_xmit retval = SCTP_XMIT_OK;
762	size_t psize, pmtu, maxsize;
763
764	/* Don't bundle in this packet if this chunk's auth key doesn't
765	 * match other chunks already enqueued on this packet. Also,
766	 * don't bundle the chunk with auth key if other chunks in this
767	 * packet don't have auth key.
768	 */
769	if ((packet->auth && chunk->shkey != packet->auth->shkey) ||
770	    (!packet->auth && chunk->shkey &&
771	     chunk->chunk_hdr->type != SCTP_CID_AUTH))
772		return SCTP_XMIT_PMTU_FULL;
773
774	psize = packet->size;
775	if (packet->transport->asoc)
776		pmtu = packet->transport->asoc->pathmtu;
777	else
778		pmtu = packet->transport->pathmtu;
779
780	/* Decide if we need to fragment or resubmit later. */
781	if (psize + chunk_len > pmtu) {
782		/* It's OK to fragment at IP level if any one of the following
783		 * is true:
784		 *	1. The packet is empty (meaning this chunk is greater
785		 *	   the MTU)
786		 *	2. The packet doesn't have any data in it yet and data
787		 *	   requires authentication.
788		 */
789		if (sctp_packet_empty(packet) ||
790		    (!packet->has_data && chunk->auth)) {
791			/* We no longer do re-fragmentation.
792			 * Just fragment at the IP layer, if we
793			 * actually hit this condition
794			 */
795			packet->ipfragok = 1;
796			goto out;
797		}
798
799		/* Similarly, if this chunk was built before a PMTU
800		 * reduction, we have to fragment it at IP level now. So
801		 * if the packet already contains something, we need to
802		 * flush.
803		 */
804		maxsize = pmtu - packet->overhead;
805		if (packet->auth)
806			maxsize -= SCTP_PAD4(packet->auth->skb->len);
807		if (chunk_len > maxsize)
808			retval = SCTP_XMIT_PMTU_FULL;
809
810		/* It is also okay to fragment if the chunk we are
811		 * adding is a control chunk, but only if current packet
812		 * is not a GSO one otherwise it causes fragmentation of
813		 * a large frame. So in this case we allow the
814		 * fragmentation by forcing it to be in a new packet.
815		 */
816		if (!sctp_chunk_is_data(chunk) && packet->has_data)
817			retval = SCTP_XMIT_PMTU_FULL;
818
819		if (psize + chunk_len > packet->max_size)
820			/* Hit GSO/PMTU limit, gotta flush */
821			retval = SCTP_XMIT_PMTU_FULL;
822
823		if (!packet->transport->burst_limited &&
824		    psize + chunk_len > (packet->transport->cwnd >> 1))
825			/* Do not allow a single GSO packet to use more
826			 * than half of cwnd.
827			 */
828			retval = SCTP_XMIT_PMTU_FULL;
829
830		if (packet->transport->burst_limited &&
831		    psize + chunk_len > (packet->transport->burst_limited >> 1))
832			/* Do not allow a single GSO packet to use more
833			 * than half of original cwnd.
834			 */
835			retval = SCTP_XMIT_PMTU_FULL;
836		/* Otherwise it will fit in the GSO packet */
837	}
838
839out:
840	return retval;
841}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/* SCTP kernel implementation
  3 * (C) Copyright IBM Corp. 2001, 2004
  4 * Copyright (c) 1999-2000 Cisco, Inc.
  5 * Copyright (c) 1999-2001 Motorola, Inc.
  6 *
  7 * This file is part of the SCTP kernel implementation
  8 *
  9 * These functions handle output processing.
 10 *
 11 * Please send any bug reports or fixes you make to the
 12 * email address(es):
 13 *    lksctp developers <linux-sctp@vger.kernel.org>
 14 *
 15 * Written or modified by:
 16 *    La Monte H.P. Yarroll <piggy@acm.org>
 17 *    Karl Knutson          <karl@athena.chicago.il.us>
 18 *    Jon Grimm             <jgrimm@austin.ibm.com>
 19 *    Sridhar Samudrala     <sri@us.ibm.com>
 20 */
 21
 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 23
 24#include <linux/types.h>
 25#include <linux/kernel.h>
 26#include <linux/wait.h>
 27#include <linux/time.h>
 28#include <linux/ip.h>
 29#include <linux/ipv6.h>
 30#include <linux/init.h>
 31#include <linux/slab.h>
 32#include <net/inet_ecn.h>
 33#include <net/ip.h>
 34#include <net/icmp.h>
 35#include <net/net_namespace.h>
 36
 37#include <linux/socket.h> /* for sa_family_t */
 38#include <net/sock.h>
 39
 40#include <net/sctp/sctp.h>
 41#include <net/sctp/sm.h>
 42#include <net/sctp/checksum.h>
 43
 44/* Forward declarations for private helpers. */
 45static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
 46						 struct sctp_chunk *chunk);
 47static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
 48						  struct sctp_chunk *chunk);
 49static void sctp_packet_append_data(struct sctp_packet *packet,
 50				    struct sctp_chunk *chunk);
 51static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
 52					   struct sctp_chunk *chunk,
 53					   u16 chunk_len);
 54
 55static void sctp_packet_reset(struct sctp_packet *packet)
 56{
 57	/* sctp_packet_transmit() relies on this to reset size to the
 58	 * current overhead after sending packets.
 59	 */
 60	packet->size = packet->overhead;
 61
 62	packet->has_cookie_echo = 0;
 63	packet->has_sack = 0;
 64	packet->has_data = 0;
 65	packet->has_auth = 0;
 66	packet->ipfragok = 0;
 67	packet->auth = NULL;
 68}
 69
 70/* Config a packet.
 71 * This appears to be a followup set of initializations.
 72 */
 73void sctp_packet_config(struct sctp_packet *packet, __u32 vtag,
 74			int ecn_capable)
 75{
 76	struct sctp_transport *tp = packet->transport;
 77	struct sctp_association *asoc = tp->asoc;
 78	struct sctp_sock *sp = NULL;
 79	struct sock *sk;
 80
 81	pr_debug("%s: packet:%p vtag:0x%x\n", __func__, packet, vtag);
 82	packet->vtag = vtag;
 83
 84	/* do the following jobs only once for a flush schedule */
 85	if (!sctp_packet_empty(packet))
 86		return;
 87
 88	/* set packet max_size with pathmtu, then calculate overhead */
 89	packet->max_size = tp->pathmtu;
 90
 91	if (asoc) {
 92		sk = asoc->base.sk;
 93		sp = sctp_sk(sk);
 94	}
 95	packet->overhead = sctp_mtu_payload(sp, 0, 0);
 96	packet->size = packet->overhead;
 97
 98	if (!asoc)
 99		return;
100
101	/* update dst or transport pathmtu if in need */
102	if (!sctp_transport_dst_check(tp)) {
103		sctp_transport_route(tp, NULL, sp);
104		if (asoc->param_flags & SPP_PMTUD_ENABLE)
105			sctp_assoc_sync_pmtu(asoc);
106	} else if (!sctp_transport_pl_enabled(tp) &&
107		   asoc->param_flags & SPP_PMTUD_ENABLE) {
108		if (!sctp_transport_pmtu_check(tp))
109			sctp_assoc_sync_pmtu(asoc);
110	}
111
112	if (asoc->pmtu_pending) {
113		if (asoc->param_flags & SPP_PMTUD_ENABLE)
114			sctp_assoc_sync_pmtu(asoc);
115		asoc->pmtu_pending = 0;
116	}
117
118	/* If there a is a prepend chunk stick it on the list before
119	 * any other chunks get appended.
120	 */
121	if (ecn_capable) {
122		struct sctp_chunk *chunk = sctp_get_ecne_prepend(asoc);
123
124		if (chunk)
125			sctp_packet_append_chunk(packet, chunk);
126	}
127
128	if (!tp->dst)
129		return;
130
131	/* set packet max_size with gso_max_size if gso is enabled*/
132	rcu_read_lock();
133	if (__sk_dst_get(sk) != tp->dst) {
134		dst_hold(tp->dst);
135		sk_setup_caps(sk, tp->dst);
136	}
137	packet->max_size = sk_can_gso(sk) ? min(READ_ONCE(tp->dst->dev->gso_max_size),
138						GSO_LEGACY_MAX_SIZE)
139					  : asoc->pathmtu;
140	rcu_read_unlock();
141}
142
143/* Initialize the packet structure. */
144void sctp_packet_init(struct sctp_packet *packet,
145		      struct sctp_transport *transport,
146		      __u16 sport, __u16 dport)
147{
148	pr_debug("%s: packet:%p transport:%p\n", __func__, packet, transport);
149
150	packet->transport = transport;
151	packet->source_port = sport;
152	packet->destination_port = dport;
153	INIT_LIST_HEAD(&packet->chunk_list);
154	/* The overhead will be calculated by sctp_packet_config() */
155	packet->overhead = 0;
156	sctp_packet_reset(packet);
157	packet->vtag = 0;
158}
159
160/* Free a packet.  */
161void sctp_packet_free(struct sctp_packet *packet)
162{
163	struct sctp_chunk *chunk, *tmp;
164
165	pr_debug("%s: packet:%p\n", __func__, packet);
166
167	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
168		list_del_init(&chunk->list);
169		sctp_chunk_free(chunk);
170	}
171}
172
173/* This routine tries to append the chunk to the offered packet. If adding
174 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
175 * is not present in the packet, it transmits the input packet.
176 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
177 * as it can fit in the packet, but any more data that does not fit in this
178 * packet can be sent only after receiving the COOKIE_ACK.
179 */
180enum sctp_xmit sctp_packet_transmit_chunk(struct sctp_packet *packet,
181					  struct sctp_chunk *chunk,
182					  int one_packet, gfp_t gfp)
183{
184	enum sctp_xmit retval;
185
186	pr_debug("%s: packet:%p size:%zu chunk:%p size:%d\n", __func__,
187		 packet, packet->size, chunk, chunk->skb ? chunk->skb->len : -1);
188
189	switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
190	case SCTP_XMIT_PMTU_FULL:
191		if (!packet->has_cookie_echo) {
192			int error = 0;
193
194			error = sctp_packet_transmit(packet, gfp);
195			if (error < 0)
196				chunk->skb->sk->sk_err = -error;
197
198			/* If we have an empty packet, then we can NOT ever
199			 * return PMTU_FULL.
200			 */
201			if (!one_packet)
202				retval = sctp_packet_append_chunk(packet,
203								  chunk);
204		}
205		break;
206
207	case SCTP_XMIT_RWND_FULL:
208	case SCTP_XMIT_OK:
209	case SCTP_XMIT_DELAY:
210		break;
211	}
212
213	return retval;
214}
215
216/* Try to bundle a pad chunk into a packet with a heartbeat chunk for PLPMTUTD probe */
217static enum sctp_xmit sctp_packet_bundle_pad(struct sctp_packet *pkt, struct sctp_chunk *chunk)
218{
219	struct sctp_transport *t = pkt->transport;
220	struct sctp_chunk *pad;
221	int overhead = 0;
222
223	if (!chunk->pmtu_probe)
224		return SCTP_XMIT_OK;
225
226	/* calculate the Padding Data size for the pad chunk */
227	overhead += sizeof(struct sctphdr) + sizeof(struct sctp_chunkhdr);
228	overhead += sizeof(struct sctp_sender_hb_info) + sizeof(struct sctp_pad_chunk);
229	pad = sctp_make_pad(t->asoc, t->pl.probe_size - overhead);
230	if (!pad)
231		return SCTP_XMIT_DELAY;
232
233	list_add_tail(&pad->list, &pkt->chunk_list);
234	pkt->size += SCTP_PAD4(ntohs(pad->chunk_hdr->length));
235	chunk->transport = t;
236
237	return SCTP_XMIT_OK;
238}
239
240/* Try to bundle an auth chunk into the packet. */
241static enum sctp_xmit sctp_packet_bundle_auth(struct sctp_packet *pkt,
242					      struct sctp_chunk *chunk)
243{
244	struct sctp_association *asoc = pkt->transport->asoc;
245	enum sctp_xmit retval = SCTP_XMIT_OK;
246	struct sctp_chunk *auth;
247
248	/* if we don't have an association, we can't do authentication */
249	if (!asoc)
250		return retval;
251
252	/* See if this is an auth chunk we are bundling or if
253	 * auth is already bundled.
254	 */
255	if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
256		return retval;
257
258	/* if the peer did not request this chunk to be authenticated,
259	 * don't do it
260	 */
261	if (!chunk->auth)
262		return retval;
263
264	auth = sctp_make_auth(asoc, chunk->shkey->key_id);
265	if (!auth)
266		return retval;
267
268	auth->shkey = chunk->shkey;
269	sctp_auth_shkey_hold(auth->shkey);
270
271	retval = __sctp_packet_append_chunk(pkt, auth);
272
273	if (retval != SCTP_XMIT_OK)
274		sctp_chunk_free(auth);
275
276	return retval;
277}
278
279/* Try to bundle a SACK with the packet. */
280static enum sctp_xmit sctp_packet_bundle_sack(struct sctp_packet *pkt,
281					      struct sctp_chunk *chunk)
282{
283	enum sctp_xmit retval = SCTP_XMIT_OK;
284
285	/* If sending DATA and haven't aleady bundled a SACK, try to
286	 * bundle one in to the packet.
287	 */
288	if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
289	    !pkt->has_cookie_echo) {
290		struct sctp_association *asoc;
291		struct timer_list *timer;
292		asoc = pkt->transport->asoc;
293		timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
294
295		/* If the SACK timer is running, we have a pending SACK */
296		if (timer_pending(timer)) {
297			struct sctp_chunk *sack;
298
299			if (pkt->transport->sack_generation !=
300			    pkt->transport->asoc->peer.sack_generation)
301				return retval;
302
303			asoc->a_rwnd = asoc->rwnd;
304			sack = sctp_make_sack(asoc);
305			if (sack) {
306				retval = __sctp_packet_append_chunk(pkt, sack);
307				if (retval != SCTP_XMIT_OK) {
308					sctp_chunk_free(sack);
309					goto out;
310				}
311				SCTP_INC_STATS(asoc->base.net,
312					       SCTP_MIB_OUTCTRLCHUNKS);
313				asoc->stats.octrlchunks++;
314				asoc->peer.sack_needed = 0;
315				if (del_timer(timer))
316					sctp_association_put(asoc);
317			}
318		}
319	}
320out:
321	return retval;
322}
323
324
325/* Append a chunk to the offered packet reporting back any inability to do
326 * so.
327 */
328static enum sctp_xmit __sctp_packet_append_chunk(struct sctp_packet *packet,
329						 struct sctp_chunk *chunk)
330{
331	__u16 chunk_len = SCTP_PAD4(ntohs(chunk->chunk_hdr->length));
332	enum sctp_xmit retval = SCTP_XMIT_OK;
333
334	/* Check to see if this chunk will fit into the packet */
335	retval = sctp_packet_will_fit(packet, chunk, chunk_len);
336	if (retval != SCTP_XMIT_OK)
337		goto finish;
338
339	/* We believe that this chunk is OK to add to the packet */
340	switch (chunk->chunk_hdr->type) {
341	case SCTP_CID_DATA:
342	case SCTP_CID_I_DATA:
343		/* Account for the data being in the packet */
344		sctp_packet_append_data(packet, chunk);
345		/* Disallow SACK bundling after DATA. */
346		packet->has_sack = 1;
347		/* Disallow AUTH bundling after DATA */
348		packet->has_auth = 1;
349		/* Let it be knows that packet has DATA in it */
350		packet->has_data = 1;
351		/* timestamp the chunk for rtx purposes */
352		chunk->sent_at = jiffies;
353		/* Mainly used for prsctp RTX policy */
354		chunk->sent_count++;
355		break;
356	case SCTP_CID_COOKIE_ECHO:
357		packet->has_cookie_echo = 1;
358		break;
359
360	case SCTP_CID_SACK:
361		packet->has_sack = 1;
362		if (chunk->asoc)
363			chunk->asoc->stats.osacks++;
364		break;
365
366	case SCTP_CID_AUTH:
367		packet->has_auth = 1;
368		packet->auth = chunk;
369		break;
370	}
371
372	/* It is OK to send this chunk.  */
373	list_add_tail(&chunk->list, &packet->chunk_list);
374	packet->size += chunk_len;
375	chunk->transport = packet->transport;
376finish:
377	return retval;
378}
379
380/* Append a chunk to the offered packet reporting back any inability to do
381 * so.
382 */
383enum sctp_xmit sctp_packet_append_chunk(struct sctp_packet *packet,
384					struct sctp_chunk *chunk)
385{
386	enum sctp_xmit retval = SCTP_XMIT_OK;
387
388	pr_debug("%s: packet:%p chunk:%p\n", __func__, packet, chunk);
389
390	/* Data chunks are special.  Before seeing what else we can
391	 * bundle into this packet, check to see if we are allowed to
392	 * send this DATA.
393	 */
394	if (sctp_chunk_is_data(chunk)) {
395		retval = sctp_packet_can_append_data(packet, chunk);
396		if (retval != SCTP_XMIT_OK)
397			goto finish;
398	}
399
400	/* Try to bundle AUTH chunk */
401	retval = sctp_packet_bundle_auth(packet, chunk);
402	if (retval != SCTP_XMIT_OK)
403		goto finish;
404
405	/* Try to bundle SACK chunk */
406	retval = sctp_packet_bundle_sack(packet, chunk);
407	if (retval != SCTP_XMIT_OK)
408		goto finish;
409
410	retval = __sctp_packet_append_chunk(packet, chunk);
411	if (retval != SCTP_XMIT_OK)
412		goto finish;
413
414	retval = sctp_packet_bundle_pad(packet, chunk);
415
416finish:
417	return retval;
418}
419
420static void sctp_packet_gso_append(struct sk_buff *head, struct sk_buff *skb)
421{
422	if (SCTP_OUTPUT_CB(head)->last == head)
423		skb_shinfo(head)->frag_list = skb;
424	else
425		SCTP_OUTPUT_CB(head)->last->next = skb;
426	SCTP_OUTPUT_CB(head)->last = skb;
427
428	head->truesize += skb->truesize;
429	head->data_len += skb->len;
430	head->len += skb->len;
431	refcount_add(skb->truesize, &head->sk->sk_wmem_alloc);
432
433	__skb_header_release(skb);
434}
435
436static int sctp_packet_pack(struct sctp_packet *packet,
437			    struct sk_buff *head, int gso, gfp_t gfp)
438{
439	struct sctp_transport *tp = packet->transport;
440	struct sctp_auth_chunk *auth = NULL;
441	struct sctp_chunk *chunk, *tmp;
442	int pkt_count = 0, pkt_size;
443	struct sock *sk = head->sk;
444	struct sk_buff *nskb;
445	int auth_len = 0;
446
447	if (gso) {
448		skb_shinfo(head)->gso_type = sk->sk_gso_type;
449		SCTP_OUTPUT_CB(head)->last = head;
450	} else {
451		nskb = head;
452		pkt_size = packet->size;
453		goto merge;
454	}
455
456	do {
457		/* calculate the pkt_size and alloc nskb */
458		pkt_size = packet->overhead;
459		list_for_each_entry_safe(chunk, tmp, &packet->chunk_list,
460					 list) {
461			int padded = SCTP_PAD4(chunk->skb->len);
462
463			if (chunk == packet->auth)
464				auth_len = padded;
465			else if (auth_len + padded + packet->overhead >
466				 tp->pathmtu)
467				return 0;
468			else if (pkt_size + padded > tp->pathmtu)
469				break;
470			pkt_size += padded;
471		}
472		nskb = alloc_skb(pkt_size + MAX_HEADER, gfp);
473		if (!nskb)
474			return 0;
475		skb_reserve(nskb, packet->overhead + MAX_HEADER);
476
477merge:
478		/* merge chunks into nskb and append nskb into head list */
479		pkt_size -= packet->overhead;
480		list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
481			int padding;
482
483			list_del_init(&chunk->list);
484			if (sctp_chunk_is_data(chunk)) {
485				if (!sctp_chunk_retransmitted(chunk) &&
486				    !tp->rto_pending) {
487					chunk->rtt_in_progress = 1;
488					tp->rto_pending = 1;
489				}
490			}
491
492			padding = SCTP_PAD4(chunk->skb->len) - chunk->skb->len;
493			if (padding)
494				skb_put_zero(chunk->skb, padding);
495
496			if (chunk == packet->auth)
497				auth = (struct sctp_auth_chunk *)
498							skb_tail_pointer(nskb);
499
500			skb_put_data(nskb, chunk->skb->data, chunk->skb->len);
501
502			pr_debug("*** Chunk:%p[%s] %s 0x%x, length:%d, chunk->skb->len:%d, rtt_in_progress:%d\n",
503				 chunk,
504				 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type)),
505				 chunk->has_tsn ? "TSN" : "No TSN",
506				 chunk->has_tsn ? ntohl(chunk->subh.data_hdr->tsn) : 0,
507				 ntohs(chunk->chunk_hdr->length), chunk->skb->len,
508				 chunk->rtt_in_progress);
509
510			pkt_size -= SCTP_PAD4(chunk->skb->len);
511
512			if (!sctp_chunk_is_data(chunk) && chunk != packet->auth)
513				sctp_chunk_free(chunk);
514
515			if (!pkt_size)
516				break;
517		}
518
519		if (auth) {
520			sctp_auth_calculate_hmac(tp->asoc, nskb, auth,
521						 packet->auth->shkey, gfp);
522			/* free auth if no more chunks, or add it back */
523			if (list_empty(&packet->chunk_list))
524				sctp_chunk_free(packet->auth);
525			else
526				list_add(&packet->auth->list,
527					 &packet->chunk_list);
528		}
529
530		if (gso)
531			sctp_packet_gso_append(head, nskb);
532
533		pkt_count++;
534	} while (!list_empty(&packet->chunk_list));
535
536	if (gso) {
537		memset(head->cb, 0, max(sizeof(struct inet_skb_parm),
538					sizeof(struct inet6_skb_parm)));
539		skb_shinfo(head)->gso_segs = pkt_count;
540		skb_shinfo(head)->gso_size = GSO_BY_FRAGS;
 
 
 
 
 
 
541		goto chksum;
542	}
543
544	if (sctp_checksum_disable)
545		return 1;
546
547	if (!(tp->dst->dev->features & NETIF_F_SCTP_CRC) ||
548	    dst_xfrm(tp->dst) || packet->ipfragok || tp->encap_port) {
549		struct sctphdr *sh =
550			(struct sctphdr *)skb_transport_header(head);
551
552		sh->checksum = sctp_compute_cksum(head, 0);
553	} else {
554chksum:
555		head->ip_summed = CHECKSUM_PARTIAL;
556		head->csum_not_inet = 1;
557		head->csum_start = skb_transport_header(head) - head->head;
558		head->csum_offset = offsetof(struct sctphdr, checksum);
559	}
560
561	return pkt_count;
562}
563
564/* All packets are sent to the network through this function from
565 * sctp_outq_tail().
566 *
567 * The return value is always 0 for now.
568 */
569int sctp_packet_transmit(struct sctp_packet *packet, gfp_t gfp)
570{
571	struct sctp_transport *tp = packet->transport;
572	struct sctp_association *asoc = tp->asoc;
573	struct sctp_chunk *chunk, *tmp;
574	int pkt_count, gso = 0;
 
575	struct sk_buff *head;
576	struct sctphdr *sh;
577	struct sock *sk;
578
579	pr_debug("%s: packet:%p\n", __func__, packet);
580	if (list_empty(&packet->chunk_list))
581		return 0;
582	chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
583	sk = chunk->skb->sk;
584
585	if (packet->size > tp->pathmtu && !packet->ipfragok && !chunk->pmtu_probe) {
586		if (tp->pl.state == SCTP_PL_ERROR) { /* do IP fragmentation if in Error state */
587			packet->ipfragok = 1;
588		} else {
589			if (!sk_can_gso(sk)) { /* check gso */
590				pr_err_once("Trying to GSO but underlying device doesn't support it.");
591				goto out;
592			}
593			gso = 1;
594		}
 
595	}
596
597	/* alloc head skb */
598	head = alloc_skb((gso ? packet->overhead : packet->size) +
599			 MAX_HEADER, gfp);
600	if (!head)
601		goto out;
602	skb_reserve(head, packet->overhead + MAX_HEADER);
603	skb_set_owner_w(head, sk);
604
605	/* set sctp header */
606	sh = skb_push(head, sizeof(struct sctphdr));
607	skb_reset_transport_header(head);
608	sh->source = htons(packet->source_port);
609	sh->dest = htons(packet->destination_port);
610	sh->vtag = htonl(packet->vtag);
611	sh->checksum = 0;
612
613	/* drop packet if no dst */
614	if (!tp->dst) {
 
615		IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
616		kfree_skb(head);
617		goto out;
618	}
 
619
620	/* pack up chunks */
621	pkt_count = sctp_packet_pack(packet, head, gso, gfp);
622	if (!pkt_count) {
623		kfree_skb(head);
624		goto out;
625	}
626	pr_debug("***sctp_transmit_packet*** skb->len:%d\n", head->len);
627
628	/* start autoclose timer */
629	if (packet->has_data && sctp_state(asoc, ESTABLISHED) &&
630	    asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
631		struct timer_list *timer =
632			&asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
633		unsigned long timeout =
634			asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
635
636		if (!mod_timer(timer, jiffies + timeout))
637			sctp_association_hold(asoc);
638	}
639
640	/* sctp xmit */
641	tp->af_specific->ecn_capable(sk);
642	if (asoc) {
643		asoc->stats.opackets += pkt_count;
644		if (asoc->peer.last_sent_to != tp)
645			asoc->peer.last_sent_to = tp;
646	}
647	head->ignore_df = packet->ipfragok;
648	if (tp->dst_pending_confirm)
649		skb_set_dst_pending_confirm(head, 1);
650	/* neighbour should be confirmed on successful transmission or
651	 * positive error
652	 */
653	if (tp->af_specific->sctp_xmit(head, tp) >= 0 &&
654	    tp->dst_pending_confirm)
655		tp->dst_pending_confirm = 0;
656
657out:
658	list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
659		list_del_init(&chunk->list);
660		if (!sctp_chunk_is_data(chunk))
661			sctp_chunk_free(chunk);
662	}
663	sctp_packet_reset(packet);
664	return 0;
665}
666
667/********************************************************************
668 * 2nd Level Abstractions
669 ********************************************************************/
670
671/* This private function check to see if a chunk can be added */
672static enum sctp_xmit sctp_packet_can_append_data(struct sctp_packet *packet,
673						  struct sctp_chunk *chunk)
674{
675	size_t datasize, rwnd, inflight, flight_size;
676	struct sctp_transport *transport = packet->transport;
677	struct sctp_association *asoc = transport->asoc;
678	struct sctp_outq *q = &asoc->outqueue;
679
680	/* RFC 2960 6.1  Transmission of DATA Chunks
681	 *
682	 * A) At any given time, the data sender MUST NOT transmit new data to
683	 * any destination transport address if its peer's rwnd indicates
684	 * that the peer has no buffer space (i.e. rwnd is 0, see Section
685	 * 6.2.1).  However, regardless of the value of rwnd (including if it
686	 * is 0), the data sender can always have one DATA chunk in flight to
687	 * the receiver if allowed by cwnd (see rule B below).  This rule
688	 * allows the sender to probe for a change in rwnd that the sender
689	 * missed due to the SACK having been lost in transit from the data
690	 * receiver to the data sender.
691	 */
692
693	rwnd = asoc->peer.rwnd;
694	inflight = q->outstanding_bytes;
695	flight_size = transport->flight_size;
696
697	datasize = sctp_data_size(chunk);
698
699	if (datasize > rwnd && inflight > 0)
700		/* We have (at least) one data chunk in flight,
701		 * so we can't fall back to rule 6.1 B).
702		 */
703		return SCTP_XMIT_RWND_FULL;
704
705	/* RFC 2960 6.1  Transmission of DATA Chunks
706	 *
707	 * B) At any given time, the sender MUST NOT transmit new data
708	 * to a given transport address if it has cwnd or more bytes
709	 * of data outstanding to that transport address.
710	 */
711	/* RFC 7.2.4 & the Implementers Guide 2.8.
712	 *
713	 * 3) ...
714	 *    When a Fast Retransmit is being performed the sender SHOULD
715	 *    ignore the value of cwnd and SHOULD NOT delay retransmission.
716	 */
717	if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
718	    flight_size >= transport->cwnd)
719		return SCTP_XMIT_RWND_FULL;
720
721	/* Nagle's algorithm to solve small-packet problem:
722	 * Inhibit the sending of new chunks when new outgoing data arrives
723	 * if any previously transmitted data on the connection remains
724	 * unacknowledged.
725	 */
726
727	if ((sctp_sk(asoc->base.sk)->nodelay || inflight == 0) &&
728	    !asoc->force_delay)
729		/* Nothing unacked */
730		return SCTP_XMIT_OK;
731
732	if (!sctp_packet_empty(packet))
733		/* Append to packet */
734		return SCTP_XMIT_OK;
735
736	if (!sctp_state(asoc, ESTABLISHED))
737		return SCTP_XMIT_OK;
738
739	/* Check whether this chunk and all the rest of pending data will fit
740	 * or delay in hopes of bundling a full sized packet.
741	 */
742	if (chunk->skb->len + q->out_qlen > transport->pathmtu -
743	    packet->overhead - sctp_datachk_len(&chunk->asoc->stream) - 4)
744		/* Enough data queued to fill a packet */
745		return SCTP_XMIT_OK;
746
747	/* Don't delay large message writes that may have been fragmented */
748	if (!chunk->msg->can_delay)
749		return SCTP_XMIT_OK;
750
751	/* Defer until all data acked or packet full */
752	return SCTP_XMIT_DELAY;
753}
754
755/* This private function does management things when adding DATA chunk */
756static void sctp_packet_append_data(struct sctp_packet *packet,
757				struct sctp_chunk *chunk)
758{
759	struct sctp_transport *transport = packet->transport;
760	size_t datasize = sctp_data_size(chunk);
761	struct sctp_association *asoc = transport->asoc;
762	u32 rwnd = asoc->peer.rwnd;
763
764	/* Keep track of how many bytes are in flight over this transport. */
765	transport->flight_size += datasize;
766
767	/* Keep track of how many bytes are in flight to the receiver. */
768	asoc->outqueue.outstanding_bytes += datasize;
769
770	/* Update our view of the receiver's rwnd. */
771	if (datasize < rwnd)
772		rwnd -= datasize;
773	else
774		rwnd = 0;
775
776	asoc->peer.rwnd = rwnd;
777	sctp_chunk_assign_tsn(chunk);
778	asoc->stream.si->assign_number(chunk);
779}
780
781static enum sctp_xmit sctp_packet_will_fit(struct sctp_packet *packet,
782					   struct sctp_chunk *chunk,
783					   u16 chunk_len)
784{
785	enum sctp_xmit retval = SCTP_XMIT_OK;
786	size_t psize, pmtu, maxsize;
787
788	/* Don't bundle in this packet if this chunk's auth key doesn't
789	 * match other chunks already enqueued on this packet. Also,
790	 * don't bundle the chunk with auth key if other chunks in this
791	 * packet don't have auth key.
792	 */
793	if ((packet->auth && chunk->shkey != packet->auth->shkey) ||
794	    (!packet->auth && chunk->shkey &&
795	     chunk->chunk_hdr->type != SCTP_CID_AUTH))
796		return SCTP_XMIT_PMTU_FULL;
797
798	psize = packet->size;
799	if (packet->transport->asoc)
800		pmtu = packet->transport->asoc->pathmtu;
801	else
802		pmtu = packet->transport->pathmtu;
803
804	/* Decide if we need to fragment or resubmit later. */
805	if (psize + chunk_len > pmtu) {
806		/* It's OK to fragment at IP level if any one of the following
807		 * is true:
808		 *	1. The packet is empty (meaning this chunk is greater
809		 *	   the MTU)
810		 *	2. The packet doesn't have any data in it yet and data
811		 *	   requires authentication.
812		 */
813		if (sctp_packet_empty(packet) ||
814		    (!packet->has_data && chunk->auth)) {
815			/* We no longer do re-fragmentation.
816			 * Just fragment at the IP layer, if we
817			 * actually hit this condition
818			 */
819			packet->ipfragok = 1;
820			goto out;
821		}
822
823		/* Similarly, if this chunk was built before a PMTU
824		 * reduction, we have to fragment it at IP level now. So
825		 * if the packet already contains something, we need to
826		 * flush.
827		 */
828		maxsize = pmtu - packet->overhead;
829		if (packet->auth)
830			maxsize -= SCTP_PAD4(packet->auth->skb->len);
831		if (chunk_len > maxsize)
832			retval = SCTP_XMIT_PMTU_FULL;
833
834		/* It is also okay to fragment if the chunk we are
835		 * adding is a control chunk, but only if current packet
836		 * is not a GSO one otherwise it causes fragmentation of
837		 * a large frame. So in this case we allow the
838		 * fragmentation by forcing it to be in a new packet.
839		 */
840		if (!sctp_chunk_is_data(chunk) && packet->has_data)
841			retval = SCTP_XMIT_PMTU_FULL;
842
843		if (psize + chunk_len > packet->max_size)
844			/* Hit GSO/PMTU limit, gotta flush */
845			retval = SCTP_XMIT_PMTU_FULL;
846
847		if (!packet->transport->burst_limited &&
848		    psize + chunk_len > (packet->transport->cwnd >> 1))
849			/* Do not allow a single GSO packet to use more
850			 * than half of cwnd.
851			 */
852			retval = SCTP_XMIT_PMTU_FULL;
853
854		if (packet->transport->burst_limited &&
855		    psize + chunk_len > (packet->transport->burst_limited >> 1))
856			/* Do not allow a single GSO packet to use more
857			 * than half of original cwnd.
858			 */
859			retval = SCTP_XMIT_PMTU_FULL;
860		/* Otherwise it will fit in the GSO packet */
861	}
862
863out:
864	return retval;
865}