Linux Audio

Check our new training course

Loading...
v4.6
 
   1/* SCTP kernel implementation
   2 * (C) Copyright IBM Corp. 2001, 2004
   3 * Copyright (c) 1999-2000 Cisco, Inc.
   4 * Copyright (c) 1999-2001 Motorola, Inc.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 Nokia, Inc.
   7 * Copyright (c) 2001 La Monte H.P. Yarroll
   8 *
   9 * This abstraction carries sctp events to the ULP (sockets).
  10 *
  11 * This SCTP implementation is free software;
  12 * you can redistribute it and/or modify it under the terms of
  13 * the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2, or (at your option)
  15 * any later version.
  16 *
  17 * This SCTP implementation is distributed in the hope that it
  18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19 *                 ************************
  20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21 * See the GNU General Public License for more details.
  22 *
  23 * You should have received a copy of the GNU General Public License
  24 * along with GNU CC; see the file COPYING.  If not, see
  25 * <http://www.gnu.org/licenses/>.
  26 *
  27 * Please send any bug reports or fixes you make to the
  28 * email address(es):
  29 *    lksctp developers <linux-sctp@vger.kernel.org>
  30 *
  31 * Written or modified by:
  32 *    Jon Grimm             <jgrimm@us.ibm.com>
  33 *    La Monte H.P. Yarroll <piggy@acm.org>
  34 *    Sridhar Samudrala     <sri@us.ibm.com>
  35 */
  36
  37#include <linux/slab.h>
  38#include <linux/types.h>
  39#include <linux/skbuff.h>
  40#include <net/sock.h>
  41#include <net/busy_poll.h>
  42#include <net/sctp/structs.h>
  43#include <net/sctp/sctp.h>
  44#include <net/sctp/sm.h>
  45
  46/* Forward declarations for internal helpers.  */
  47static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  48					      struct sctp_ulpevent *);
  49static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
  50					      struct sctp_ulpevent *);
  51static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  52
  53/* 1st Level Abstractions */
  54
  55/* Initialize a ULP queue from a block of memory.  */
  56struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  57				 struct sctp_association *asoc)
  58{
  59	memset(ulpq, 0, sizeof(struct sctp_ulpq));
  60
  61	ulpq->asoc = asoc;
  62	skb_queue_head_init(&ulpq->reasm);
 
  63	skb_queue_head_init(&ulpq->lobby);
  64	ulpq->pd_mode  = 0;
  65
  66	return ulpq;
  67}
  68
  69
  70/* Flush the reassembly and ordering queues.  */
  71void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  72{
  73	struct sk_buff *skb;
  74	struct sctp_ulpevent *event;
  75
  76	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  77		event = sctp_skb2event(skb);
  78		sctp_ulpevent_free(event);
  79	}
  80
  81	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  82		event = sctp_skb2event(skb);
  83		sctp_ulpevent_free(event);
  84	}
  85
 
 
 
 
  86}
  87
  88/* Dispose of a ulpqueue.  */
  89void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  90{
  91	sctp_ulpq_flush(ulpq);
  92}
  93
  94/* Process an incoming DATA chunk.  */
  95int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  96			gfp_t gfp)
  97{
  98	struct sk_buff_head temp;
  99	struct sctp_ulpevent *event;
 100	int event_eor = 0;
 101
 102	/* Create an event from the incoming chunk. */
 103	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
 104	if (!event)
 105		return -ENOMEM;
 106
 
 
 
 107	/* Do reassembly if needed.  */
 108	event = sctp_ulpq_reasm(ulpq, event);
 109
 110	/* Do ordering if needed.  */
 111	if ((event) && (event->msg_flags & MSG_EOR)) {
 112		/* Create a temporary list to collect chunks on.  */
 113		skb_queue_head_init(&temp);
 114		__skb_queue_tail(&temp, sctp_event2skb(event));
 115
 116		event = sctp_ulpq_order(ulpq, event);
 
 117	}
 118
 119	/* Send event to the ULP.  'event' is the sctp_ulpevent for
 120	 * very first SKB on the 'temp' list.
 121	 */
 122	if (event) {
 123		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
 124		sctp_ulpq_tail_event(ulpq, event);
 125	}
 126
 127	return event_eor;
 128}
 129
 130/* Add a new event for propagation to the ULP.  */
 131/* Clear the partial delivery mode for this socket.   Note: This
 132 * assumes that no association is currently in partial delivery mode.
 133 */
 134int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
 135{
 136	struct sctp_sock *sp = sctp_sk(sk);
 137
 138	if (atomic_dec_and_test(&sp->pd_mode)) {
 139		/* This means there are no other associations in PD, so
 140		 * we can go ahead and clear out the lobby in one shot
 141		 */
 142		if (!skb_queue_empty(&sp->pd_lobby)) {
 143			struct list_head *list;
 144			sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
 145			list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
 146			INIT_LIST_HEAD(list);
 147			return 1;
 148		}
 149	} else {
 150		/* There are other associations in PD, so we only need to
 151		 * pull stuff out of the lobby that belongs to the
 152		 * associations that is exiting PD (all of its notifications
 153		 * are posted here).
 154		 */
 155		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
 156			struct sk_buff *skb, *tmp;
 157			struct sctp_ulpevent *event;
 158
 159			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
 160				event = sctp_skb2event(skb);
 161				if (event->asoc == asoc) {
 162					__skb_unlink(skb, &sp->pd_lobby);
 163					__skb_queue_tail(&sk->sk_receive_queue,
 164							 skb);
 165				}
 166			}
 167		}
 168	}
 169
 170	return 0;
 171}
 172
 173/* Set the pd_mode on the socket and ulpq */
 174static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
 175{
 176	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
 177
 178	atomic_inc(&sp->pd_mode);
 179	ulpq->pd_mode = 1;
 180}
 181
 182/* Clear the pd_mode and restart any pending messages waiting for delivery. */
 183static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 184{
 185	ulpq->pd_mode = 0;
 186	sctp_ulpq_reasm_drain(ulpq);
 187	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 188}
 189
 190/* If the SKB of 'event' is on a list, it is the first such member
 191 * of that list.
 192 */
 193int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 194{
 195	struct sock *sk = ulpq->asoc->base.sk;
 196	struct sk_buff_head *queue, *skb_list;
 197	struct sk_buff *skb = sctp_event2skb(event);
 
 
 198	int clear_pd = 0;
 199
 200	skb_list = (struct sk_buff_head *) skb->prev;
 
 201
 202	/* If the socket is just going to throw this away, do not
 203	 * even try to deliver it.
 204	 */
 205	if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
 
 
 206		goto out_free;
 207
 208	if (!sctp_ulpevent_is_notification(event)) {
 209		sk_mark_napi_id(sk, skb);
 210		sk_incoming_cpu_update(sk);
 211	}
 212	/* Check if the user wishes to receive this event.  */
 213	if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
 214		goto out_free;
 215
 216	/* If we are in partial delivery mode, post to the lobby until
 217	 * partial delivery is cleared, unless, of course _this_ is
 218	 * the association the cause of the partial delivery.
 219	 */
 220
 221	if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
 222		queue = &sk->sk_receive_queue;
 223	} else {
 224		if (ulpq->pd_mode) {
 225			/* If the association is in partial delivery, we
 226			 * need to finish delivering the partially processed
 227			 * packet before passing any other data.  This is
 228			 * because we don't truly support stream interleaving.
 229			 */
 230			if ((event->msg_flags & MSG_NOTIFICATION) ||
 231			    (SCTP_DATA_NOT_FRAG ==
 232				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
 233				queue = &sctp_sk(sk)->pd_lobby;
 234			else {
 235				clear_pd = event->msg_flags & MSG_EOR;
 236				queue = &sk->sk_receive_queue;
 237			}
 238		} else {
 239			/*
 240			 * If fragment interleave is enabled, we
 241			 * can queue this to the receive queue instead
 242			 * of the lobby.
 243			 */
 244			if (sctp_sk(sk)->frag_interleave)
 245				queue = &sk->sk_receive_queue;
 246			else
 247				queue = &sctp_sk(sk)->pd_lobby;
 248		}
 249	}
 250
 251	/* If we are harvesting multiple skbs they will be
 252	 * collected on a list.
 253	 */
 254	if (skb_list)
 255		sctp_skb_list_tail(skb_list, queue);
 256	else
 257		__skb_queue_tail(queue, skb);
 258
 259	/* Did we just complete partial delivery and need to get
 260	 * rolling again?  Move pending data to the receive
 261	 * queue.
 262	 */
 263	if (clear_pd)
 264		sctp_ulpq_clear_pd(ulpq);
 265
 266	if (queue == &sk->sk_receive_queue)
 
 
 267		sk->sk_data_ready(sk);
 
 268	return 1;
 269
 270out_free:
 271	if (skb_list)
 272		sctp_queue_purge_ulpevents(skb_list);
 273	else
 274		sctp_ulpevent_free(event);
 275
 276	return 0;
 277}
 278
 279/* 2nd Level Abstractions */
 280
 281/* Helper function to store chunks that need to be reassembled.  */
 282static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
 283					 struct sctp_ulpevent *event)
 284{
 285	struct sk_buff *pos;
 286	struct sctp_ulpevent *cevent;
 287	__u32 tsn, ctsn;
 288
 289	tsn = event->tsn;
 290
 291	/* See if it belongs at the end. */
 292	pos = skb_peek_tail(&ulpq->reasm);
 293	if (!pos) {
 294		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 295		return;
 296	}
 297
 298	/* Short circuit just dropping it at the end. */
 299	cevent = sctp_skb2event(pos);
 300	ctsn = cevent->tsn;
 301	if (TSN_lt(ctsn, tsn)) {
 302		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 303		return;
 304	}
 305
 306	/* Find the right place in this list. We store them by TSN.  */
 307	skb_queue_walk(&ulpq->reasm, pos) {
 308		cevent = sctp_skb2event(pos);
 309		ctsn = cevent->tsn;
 310
 311		if (TSN_lt(tsn, ctsn))
 312			break;
 313	}
 314
 315	/* Insert before pos. */
 316	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
 317
 318}
 319
 320/* Helper function to return an event corresponding to the reassembled
 321 * datagram.
 322 * This routine creates a re-assembled skb given the first and last skb's
 323 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 324 * payload was fragmented on the way and ip had to reassemble them.
 325 * We add the rest of skb's to the first skb's fraglist.
 326 */
 327static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
 328	struct sk_buff_head *queue, struct sk_buff *f_frag,
 329	struct sk_buff *l_frag)
 
 330{
 331	struct sk_buff *pos;
 332	struct sk_buff *new = NULL;
 333	struct sctp_ulpevent *event;
 334	struct sk_buff *pnext, *last;
 335	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
 336
 337	/* Store the pointer to the 2nd skb */
 338	if (f_frag == l_frag)
 339		pos = NULL;
 340	else
 341		pos = f_frag->next;
 342
 343	/* Get the last skb in the f_frag's frag_list if present. */
 344	for (last = list; list; last = list, list = list->next)
 345		;
 346
 347	/* Add the list of remaining fragments to the first fragments
 348	 * frag_list.
 349	 */
 350	if (last)
 351		last->next = pos;
 352	else {
 353		if (skb_cloned(f_frag)) {
 354			/* This is a cloned skb, we can't just modify
 355			 * the frag_list.  We need a new skb to do that.
 356			 * Instead of calling skb_unshare(), we'll do it
 357			 * ourselves since we need to delay the free.
 358			 */
 359			new = skb_copy(f_frag, GFP_ATOMIC);
 360			if (!new)
 361				return NULL;	/* try again later */
 362
 363			sctp_skb_set_owner_r(new, f_frag->sk);
 364
 365			skb_shinfo(new)->frag_list = pos;
 366		} else
 367			skb_shinfo(f_frag)->frag_list = pos;
 368	}
 369
 370	/* Remove the first fragment from the reassembly queue.  */
 371	__skb_unlink(f_frag, queue);
 372
 373	/* if we did unshare, then free the old skb and re-assign */
 374	if (new) {
 375		kfree_skb(f_frag);
 376		f_frag = new;
 377	}
 378
 379	while (pos) {
 380
 381		pnext = pos->next;
 382
 383		/* Update the len and data_len fields of the first fragment. */
 384		f_frag->len += pos->len;
 385		f_frag->data_len += pos->len;
 386
 387		/* Remove the fragment from the reassembly queue.  */
 388		__skb_unlink(pos, queue);
 389
 390		/* Break if we have reached the last fragment.  */
 391		if (pos == l_frag)
 392			break;
 393		pos->next = pnext;
 394		pos = pnext;
 395	}
 396
 397	event = sctp_skb2event(f_frag);
 398	SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
 399
 400	return event;
 401}
 402
 403
 404/* Helper function to check if an incoming chunk has filled up the last
 405 * missing fragment in a SCTP datagram and return the corresponding event.
 406 */
 407static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
 408{
 409	struct sk_buff *pos;
 410	struct sctp_ulpevent *cevent;
 411	struct sk_buff *first_frag = NULL;
 412	__u32 ctsn, next_tsn;
 413	struct sctp_ulpevent *retval = NULL;
 414	struct sk_buff *pd_first = NULL;
 415	struct sk_buff *pd_last = NULL;
 416	size_t pd_len = 0;
 417	struct sctp_association *asoc;
 418	u32 pd_point;
 419
 420	/* Initialized to 0 just to avoid compiler warning message.  Will
 421	 * never be used with this value. It is referenced only after it
 422	 * is set when we find the first fragment of a message.
 423	 */
 424	next_tsn = 0;
 425
 426	/* The chunks are held in the reasm queue sorted by TSN.
 427	 * Walk through the queue sequentially and look for a sequence of
 428	 * fragmented chunks that complete a datagram.
 429	 * 'first_frag' and next_tsn are reset when we find a chunk which
 430	 * is the first fragment of a datagram. Once these 2 fields are set
 431	 * we expect to find the remaining middle fragments and the last
 432	 * fragment in order. If not, first_frag is reset to NULL and we
 433	 * start the next pass when we find another first fragment.
 434	 *
 435	 * There is a potential to do partial delivery if user sets
 436	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
 437	 * to see if can do PD.
 438	 */
 439	skb_queue_walk(&ulpq->reasm, pos) {
 440		cevent = sctp_skb2event(pos);
 441		ctsn = cevent->tsn;
 442
 443		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 444		case SCTP_DATA_FIRST_FRAG:
 445			/* If this "FIRST_FRAG" is the first
 446			 * element in the queue, then count it towards
 447			 * possible PD.
 448			 */
 449			if (pos == ulpq->reasm.next) {
 450			    pd_first = pos;
 451			    pd_last = pos;
 452			    pd_len = pos->len;
 453			} else {
 454			    pd_first = NULL;
 455			    pd_last = NULL;
 456			    pd_len = 0;
 457			}
 458
 459			first_frag = pos;
 460			next_tsn = ctsn + 1;
 461			break;
 462
 463		case SCTP_DATA_MIDDLE_FRAG:
 464			if ((first_frag) && (ctsn == next_tsn)) {
 465				next_tsn++;
 466				if (pd_first) {
 467				    pd_last = pos;
 468				    pd_len += pos->len;
 469				}
 470			} else
 471				first_frag = NULL;
 472			break;
 473
 474		case SCTP_DATA_LAST_FRAG:
 475			if (first_frag && (ctsn == next_tsn))
 476				goto found;
 477			else
 478				first_frag = NULL;
 479			break;
 480		}
 481	}
 482
 483	asoc = ulpq->asoc;
 484	if (pd_first) {
 485		/* Make sure we can enter partial deliver.
 486		 * We can trigger partial delivery only if framgent
 487		 * interleave is set, or the socket is not already
 488		 * in  partial delivery.
 489		 */
 490		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
 491		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
 492			goto done;
 493
 494		cevent = sctp_skb2event(pd_first);
 495		pd_point = sctp_sk(asoc->base.sk)->pd_point;
 496		if (pd_point && pd_point <= pd_len) {
 497			retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
 498							     &ulpq->reasm,
 499							     pd_first,
 500							     pd_last);
 501			if (retval)
 502				sctp_ulpq_set_pd(ulpq);
 503		}
 504	}
 505done:
 506	return retval;
 507found:
 508	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 509					     &ulpq->reasm, first_frag, pos);
 510	if (retval)
 511		retval->msg_flags |= MSG_EOR;
 512	goto done;
 513}
 514
 515/* Retrieve the next set of fragments of a partial message. */
 516static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
 517{
 518	struct sk_buff *pos, *last_frag, *first_frag;
 519	struct sctp_ulpevent *cevent;
 520	__u32 ctsn, next_tsn;
 521	int is_last;
 522	struct sctp_ulpevent *retval;
 523
 524	/* The chunks are held in the reasm queue sorted by TSN.
 525	 * Walk through the queue sequentially and look for the first
 526	 * sequence of fragmented chunks.
 527	 */
 528
 529	if (skb_queue_empty(&ulpq->reasm))
 530		return NULL;
 531
 532	last_frag = first_frag = NULL;
 533	retval = NULL;
 534	next_tsn = 0;
 535	is_last = 0;
 536
 537	skb_queue_walk(&ulpq->reasm, pos) {
 538		cevent = sctp_skb2event(pos);
 539		ctsn = cevent->tsn;
 540
 541		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 542		case SCTP_DATA_FIRST_FRAG:
 543			if (!first_frag)
 544				return NULL;
 545			goto done;
 546		case SCTP_DATA_MIDDLE_FRAG:
 547			if (!first_frag) {
 548				first_frag = pos;
 549				next_tsn = ctsn + 1;
 550				last_frag = pos;
 551			} else if (next_tsn == ctsn) {
 552				next_tsn++;
 553				last_frag = pos;
 554			} else
 555				goto done;
 556			break;
 557		case SCTP_DATA_LAST_FRAG:
 558			if (!first_frag)
 559				first_frag = pos;
 560			else if (ctsn != next_tsn)
 561				goto done;
 562			last_frag = pos;
 563			is_last = 1;
 564			goto done;
 565		default:
 566			return NULL;
 567		}
 568	}
 569
 570	/* We have the reassembled event. There is no need to look
 571	 * further.
 572	 */
 573done:
 574	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 575					&ulpq->reasm, first_frag, last_frag);
 576	if (retval && is_last)
 577		retval->msg_flags |= MSG_EOR;
 578
 579	return retval;
 580}
 581
 582
 583/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 584 * need reassembling.
 585 */
 586static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 587						struct sctp_ulpevent *event)
 588{
 589	struct sctp_ulpevent *retval = NULL;
 590
 591	/* Check if this is part of a fragmented message.  */
 592	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
 593		event->msg_flags |= MSG_EOR;
 594		return event;
 595	}
 596
 597	sctp_ulpq_store_reasm(ulpq, event);
 598	if (!ulpq->pd_mode)
 599		retval = sctp_ulpq_retrieve_reassembled(ulpq);
 600	else {
 601		__u32 ctsn, ctsnap;
 602
 603		/* Do not even bother unless this is the next tsn to
 604		 * be delivered.
 605		 */
 606		ctsn = event->tsn;
 607		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
 608		if (TSN_lte(ctsn, ctsnap))
 609			retval = sctp_ulpq_retrieve_partial(ulpq);
 610	}
 611
 612	return retval;
 613}
 614
 615/* Retrieve the first part (sequential fragments) for partial delivery.  */
 616static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
 617{
 618	struct sk_buff *pos, *last_frag, *first_frag;
 619	struct sctp_ulpevent *cevent;
 620	__u32 ctsn, next_tsn;
 621	struct sctp_ulpevent *retval;
 622
 623	/* The chunks are held in the reasm queue sorted by TSN.
 624	 * Walk through the queue sequentially and look for a sequence of
 625	 * fragmented chunks that start a datagram.
 626	 */
 627
 628	if (skb_queue_empty(&ulpq->reasm))
 629		return NULL;
 630
 631	last_frag = first_frag = NULL;
 632	retval = NULL;
 633	next_tsn = 0;
 634
 635	skb_queue_walk(&ulpq->reasm, pos) {
 636		cevent = sctp_skb2event(pos);
 637		ctsn = cevent->tsn;
 638
 639		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 640		case SCTP_DATA_FIRST_FRAG:
 641			if (!first_frag) {
 642				first_frag = pos;
 643				next_tsn = ctsn + 1;
 644				last_frag = pos;
 645			} else
 646				goto done;
 647			break;
 648
 649		case SCTP_DATA_MIDDLE_FRAG:
 650			if (!first_frag)
 651				return NULL;
 652			if (ctsn == next_tsn) {
 653				next_tsn++;
 654				last_frag = pos;
 655			} else
 656				goto done;
 657			break;
 658
 659		case SCTP_DATA_LAST_FRAG:
 660			if (!first_frag)
 661				return NULL;
 662			else
 663				goto done;
 664			break;
 665
 666		default:
 667			return NULL;
 668		}
 669	}
 670
 671	/* We have the reassembled event. There is no need to look
 672	 * further.
 673	 */
 674done:
 675	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 676					&ulpq->reasm, first_frag, last_frag);
 677	return retval;
 678}
 679
 680/*
 681 * Flush out stale fragments from the reassembly queue when processing
 682 * a Forward TSN.
 683 *
 684 * RFC 3758, Section 3.6
 685 *
 686 * After receiving and processing a FORWARD TSN, the data receiver MUST
 687 * take cautions in updating its re-assembly queue.  The receiver MUST
 688 * remove any partially reassembled message, which is still missing one
 689 * or more TSNs earlier than or equal to the new cumulative TSN point.
 690 * In the event that the receiver has invoked the partial delivery API,
 691 * a notification SHOULD also be generated to inform the upper layer API
 692 * that the message being partially delivered will NOT be completed.
 693 */
 694void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 695{
 696	struct sk_buff *pos, *tmp;
 697	struct sctp_ulpevent *event;
 698	__u32 tsn;
 699
 700	if (skb_queue_empty(&ulpq->reasm))
 701		return;
 702
 703	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
 704		event = sctp_skb2event(pos);
 705		tsn = event->tsn;
 706
 707		/* Since the entire message must be abandoned by the
 708		 * sender (item A3 in Section 3.5, RFC 3758), we can
 709		 * free all fragments on the list that are less then
 710		 * or equal to ctsn_point
 711		 */
 712		if (TSN_lte(tsn, fwd_tsn)) {
 713			__skb_unlink(pos, &ulpq->reasm);
 714			sctp_ulpevent_free(event);
 715		} else
 716			break;
 717	}
 718}
 719
 720/*
 721 * Drain the reassembly queue.  If we just cleared parted delivery, it
 722 * is possible that the reassembly queue will contain already reassembled
 723 * messages.  Retrieve any such messages and give them to the user.
 724 */
 725static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 726{
 727	struct sctp_ulpevent *event = NULL;
 728	struct sk_buff_head temp;
 729
 730	if (skb_queue_empty(&ulpq->reasm))
 731		return;
 732
 733	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
 734		/* Do ordering if needed.  */
 735		if ((event) && (event->msg_flags & MSG_EOR)) {
 736			skb_queue_head_init(&temp);
 737			__skb_queue_tail(&temp, sctp_event2skb(event));
 738
 
 
 739			event = sctp_ulpq_order(ulpq, event);
 740		}
 741
 742		/* Send event to the ULP.  'event' is the
 743		 * sctp_ulpevent for  very first SKB on the  temp' list.
 744		 */
 745		if (event)
 746			sctp_ulpq_tail_event(ulpq, event);
 747	}
 748}
 749
 750
 751/* Helper function to gather skbs that have possibly become
 752 * ordered by an an incoming chunk.
 753 */
 754static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
 755					      struct sctp_ulpevent *event)
 756{
 757	struct sk_buff_head *event_list;
 758	struct sk_buff *pos, *tmp;
 759	struct sctp_ulpevent *cevent;
 760	struct sctp_stream *in;
 761	__u16 sid, csid, cssn;
 762
 763	sid = event->stream;
 764	in  = &ulpq->asoc->ssnmap->in;
 765
 766	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
 767
 768	/* We are holding the chunks by stream, by SSN.  */
 769	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
 770		cevent = (struct sctp_ulpevent *) pos->cb;
 771		csid = cevent->stream;
 772		cssn = cevent->ssn;
 773
 774		/* Have we gone too far?  */
 775		if (csid > sid)
 776			break;
 777
 778		/* Have we not gone far enough?  */
 779		if (csid < sid)
 780			continue;
 781
 782		if (cssn != sctp_ssn_peek(in, sid))
 783			break;
 784
 785		/* Found it, so mark in the ssnmap. */
 786		sctp_ssn_next(in, sid);
 787
 788		__skb_unlink(pos, &ulpq->lobby);
 789
 790		/* Attach all gathered skbs to the event.  */
 791		__skb_queue_tail(event_list, pos);
 792	}
 793}
 794
 795/* Helper function to store chunks needing ordering.  */
 796static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 797					   struct sctp_ulpevent *event)
 798{
 799	struct sk_buff *pos;
 800	struct sctp_ulpevent *cevent;
 801	__u16 sid, csid;
 802	__u16 ssn, cssn;
 803
 804	pos = skb_peek_tail(&ulpq->lobby);
 805	if (!pos) {
 806		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 807		return;
 808	}
 809
 810	sid = event->stream;
 811	ssn = event->ssn;
 812
 813	cevent = (struct sctp_ulpevent *) pos->cb;
 814	csid = cevent->stream;
 815	cssn = cevent->ssn;
 816	if (sid > csid) {
 817		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 818		return;
 819	}
 820
 821	if ((sid == csid) && SSN_lt(cssn, ssn)) {
 822		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 823		return;
 824	}
 825
 826	/* Find the right place in this list.  We store them by
 827	 * stream ID and then by SSN.
 828	 */
 829	skb_queue_walk(&ulpq->lobby, pos) {
 830		cevent = (struct sctp_ulpevent *) pos->cb;
 831		csid = cevent->stream;
 832		cssn = cevent->ssn;
 833
 834		if (csid > sid)
 835			break;
 836		if (csid == sid && SSN_lt(ssn, cssn))
 837			break;
 838	}
 839
 840
 841	/* Insert before pos. */
 842	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
 843}
 844
 845static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 846					     struct sctp_ulpevent *event)
 847{
 848	__u16 sid, ssn;
 849	struct sctp_stream *in;
 850
 851	/* Check if this message needs ordering.  */
 852	if (SCTP_DATA_UNORDERED & event->msg_flags)
 853		return event;
 854
 855	/* Note: The stream ID must be verified before this routine.  */
 856	sid = event->stream;
 857	ssn = event->ssn;
 858	in  = &ulpq->asoc->ssnmap->in;
 859
 860	/* Is this the expected SSN for this stream ID?  */
 861	if (ssn != sctp_ssn_peek(in, sid)) {
 862		/* We've received something out of order, so find where it
 863		 * needs to be placed.  We order by stream and then by SSN.
 864		 */
 865		sctp_ulpq_store_ordered(ulpq, event);
 866		return NULL;
 867	}
 868
 869	/* Mark that the next chunk has been found.  */
 870	sctp_ssn_next(in, sid);
 871
 872	/* Go find any other chunks that were waiting for
 873	 * ordering.
 874	 */
 875	sctp_ulpq_retrieve_ordered(ulpq, event);
 876
 877	return event;
 878}
 879
 880/* Helper function to gather skbs that have possibly become
 881 * ordered by forward tsn skipping their dependencies.
 882 */
 883static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 884{
 885	struct sk_buff *pos, *tmp;
 886	struct sctp_ulpevent *cevent;
 887	struct sctp_ulpevent *event;
 888	struct sctp_stream *in;
 889	struct sk_buff_head temp;
 890	struct sk_buff_head *lobby = &ulpq->lobby;
 891	__u16 csid, cssn;
 892
 893	in  = &ulpq->asoc->ssnmap->in;
 894
 895	/* We are holding the chunks by stream, by SSN.  */
 896	skb_queue_head_init(&temp);
 897	event = NULL;
 898	sctp_skb_for_each(pos, lobby, tmp) {
 899		cevent = (struct sctp_ulpevent *) pos->cb;
 900		csid = cevent->stream;
 901		cssn = cevent->ssn;
 902
 903		/* Have we gone too far?  */
 904		if (csid > sid)
 905			break;
 906
 907		/* Have we not gone far enough?  */
 908		if (csid < sid)
 909			continue;
 910
 911		/* see if this ssn has been marked by skipping */
 912		if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
 913			break;
 914
 915		__skb_unlink(pos, lobby);
 916		if (!event)
 917			/* Create a temporary list to collect chunks on.  */
 918			event = sctp_skb2event(pos);
 919
 920		/* Attach all gathered skbs to the event.  */
 921		__skb_queue_tail(&temp, pos);
 922	}
 923
 924	/* If we didn't reap any data, see if the next expected SSN
 925	 * is next on the queue and if so, use that.
 926	 */
 927	if (event == NULL && pos != (struct sk_buff *)lobby) {
 928		cevent = (struct sctp_ulpevent *) pos->cb;
 929		csid = cevent->stream;
 930		cssn = cevent->ssn;
 931
 932		if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
 933			sctp_ssn_next(in, csid);
 934			__skb_unlink(pos, lobby);
 935			__skb_queue_tail(&temp, pos);
 936			event = sctp_skb2event(pos);
 937		}
 938	}
 939
 940	/* Send event to the ULP.  'event' is the sctp_ulpevent for
 941	 * very first SKB on the 'temp' list.
 942	 */
 943	if (event) {
 944		/* see if we have more ordered that we can deliver */
 945		sctp_ulpq_retrieve_ordered(ulpq, event);
 946		sctp_ulpq_tail_event(ulpq, event);
 947	}
 948}
 949
 950/* Skip over an SSN. This is used during the processing of
 951 * Forwared TSN chunk to skip over the abandoned ordered data
 952 */
 953void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 954{
 955	struct sctp_stream *in;
 956
 957	/* Note: The stream ID must be verified before this routine.  */
 958	in  = &ulpq->asoc->ssnmap->in;
 959
 960	/* Is this an old SSN?  If so ignore. */
 961	if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
 962		return;
 963
 964	/* Mark that we are no longer expecting this SSN or lower. */
 965	sctp_ssn_skip(in, sid, ssn);
 966
 967	/* Go find any other chunks that were waiting for
 968	 * ordering and deliver them if needed.
 969	 */
 970	sctp_ulpq_reap_ordered(ulpq, sid);
 971}
 972
 973static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
 974		struct sk_buff_head *list, __u16 needed)
 975{
 976	__u16 freed = 0;
 977	__u32 tsn, last_tsn;
 978	struct sk_buff *skb, *flist, *last;
 979	struct sctp_ulpevent *event;
 980	struct sctp_tsnmap *tsnmap;
 981
 982	tsnmap = &ulpq->asoc->peer.tsn_map;
 983
 984	while ((skb = skb_peek_tail(list)) != NULL) {
 985		event = sctp_skb2event(skb);
 986		tsn = event->tsn;
 987
 988		/* Don't renege below the Cumulative TSN ACK Point. */
 989		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
 990			break;
 991
 992		/* Events in ordering queue may have multiple fragments
 993		 * corresponding to additional TSNs.  Sum the total
 994		 * freed space; find the last TSN.
 995		 */
 996		freed += skb_headlen(skb);
 997		flist = skb_shinfo(skb)->frag_list;
 998		for (last = flist; flist; flist = flist->next) {
 999			last = flist;
1000			freed += skb_headlen(last);
1001		}
1002		if (last)
1003			last_tsn = sctp_skb2event(last)->tsn;
1004		else
1005			last_tsn = tsn;
1006
1007		/* Unlink the event, then renege all applicable TSNs. */
1008		__skb_unlink(skb, list);
1009		sctp_ulpevent_free(event);
1010		while (TSN_lte(tsn, last_tsn)) {
1011			sctp_tsnmap_renege(tsnmap, tsn);
1012			tsn++;
1013		}
1014		if (freed >= needed)
1015			return freed;
1016	}
1017
1018	return freed;
1019}
1020
1021/* Renege 'needed' bytes from the ordering queue. */
1022static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1023{
1024	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1025}
1026
1027/* Renege 'needed' bytes from the reassembly queue. */
1028static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1029{
1030	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1031}
1032
1033/* Partial deliver the first message as there is pressure on rwnd. */
1034void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1035				gfp_t gfp)
1036{
1037	struct sctp_ulpevent *event;
1038	struct sctp_association *asoc;
1039	struct sctp_sock *sp;
1040	__u32 ctsn;
1041	struct sk_buff *skb;
1042
1043	asoc = ulpq->asoc;
1044	sp = sctp_sk(asoc->base.sk);
1045
1046	/* If the association is already in Partial Delivery mode
1047	 * we have nothing to do.
1048	 */
1049	if (ulpq->pd_mode)
1050		return;
1051
1052	/* Data must be at or below the Cumulative TSN ACK Point to
1053	 * start partial delivery.
1054	 */
1055	skb = skb_peek(&asoc->ulpq.reasm);
1056	if (skb != NULL) {
1057		ctsn = sctp_skb2event(skb)->tsn;
1058		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1059			return;
1060	}
1061
1062	/* If the user enabled fragment interleave socket option,
1063	 * multiple associations can enter partial delivery.
1064	 * Otherwise, we can only enter partial delivery if the
1065	 * socket is not in partial deliver mode.
1066	 */
1067	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1068		/* Is partial delivery possible?  */
1069		event = sctp_ulpq_retrieve_first(ulpq);
1070		/* Send event to the ULP.   */
1071		if (event) {
1072			sctp_ulpq_tail_event(ulpq, event);
 
 
 
 
1073			sctp_ulpq_set_pd(ulpq);
1074			return;
1075		}
1076	}
1077}
1078
1079/* Renege some packets to make room for an incoming chunk.  */
1080void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1081		      gfp_t gfp)
1082{
1083	struct sctp_association *asoc;
1084	__u16 needed, freed;
1085
1086	asoc = ulpq->asoc;
1087
1088	if (chunk) {
1089		needed = ntohs(chunk->chunk_hdr->length);
1090		needed -= sizeof(sctp_data_chunk_t);
1091	} else
1092		needed = SCTP_DEFAULT_MAXWINDOW;
1093
1094	freed = 0;
1095
1096	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1097		freed = sctp_ulpq_renege_order(ulpq, needed);
1098		if (freed < needed) {
1099			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1100		}
1101	}
1102	/* If able to free enough room, accept this chunk. */
1103	if (chunk && (freed >= needed)) {
1104		int retval;
1105		retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1106		/*
1107		 * Enter partial delivery if chunk has not been
1108		 * delivered; otherwise, drain the reassembly queue.
1109		 */
1110		if (retval <= 0)
1111			sctp_ulpq_partial_delivery(ulpq, gfp);
1112		else if (retval == 1)
1113			sctp_ulpq_reasm_drain(ulpq);
1114	}
1115
1116	sk_mem_reclaim(asoc->base.sk);
1117}
1118
1119
1120
1121/* Notify the application if an association is aborted and in
1122 * partial delivery mode.  Send up any pending received messages.
1123 */
1124void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1125{
1126	struct sctp_ulpevent *ev = NULL;
 
1127	struct sock *sk;
1128
1129	if (!ulpq->pd_mode)
1130		return;
1131
1132	sk = ulpq->asoc->base.sk;
1133	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1134				       &sctp_sk(sk)->subscribe))
 
1135		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1136					      SCTP_PARTIAL_DELIVERY_ABORTED,
1137					      gfp);
1138	if (ev)
1139		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1140
1141	/* If there is data waiting, send it up the socket now. */
1142	if (sctp_ulpq_clear_pd(ulpq) || ev)
 
1143		sk->sk_data_ready(sk);
 
1144}
v6.9.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* SCTP kernel implementation
   3 * (C) Copyright IBM Corp. 2001, 2004
   4 * Copyright (c) 1999-2000 Cisco, Inc.
   5 * Copyright (c) 1999-2001 Motorola, Inc.
   6 * Copyright (c) 2001 Intel Corp.
   7 * Copyright (c) 2001 Nokia, Inc.
   8 * Copyright (c) 2001 La Monte H.P. Yarroll
   9 *
  10 * This abstraction carries sctp events to the ULP (sockets).
  11 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  12 * Please send any bug reports or fixes you make to the
  13 * email address(es):
  14 *    lksctp developers <linux-sctp@vger.kernel.org>
  15 *
  16 * Written or modified by:
  17 *    Jon Grimm             <jgrimm@us.ibm.com>
  18 *    La Monte H.P. Yarroll <piggy@acm.org>
  19 *    Sridhar Samudrala     <sri@us.ibm.com>
  20 */
  21
  22#include <linux/slab.h>
  23#include <linux/types.h>
  24#include <linux/skbuff.h>
  25#include <net/sock.h>
  26#include <net/busy_poll.h>
  27#include <net/sctp/structs.h>
  28#include <net/sctp/sctp.h>
  29#include <net/sctp/sm.h>
  30
  31/* Forward declarations for internal helpers.  */
  32static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  33					      struct sctp_ulpevent *);
  34static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
  35					      struct sctp_ulpevent *);
  36static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  37
  38/* 1st Level Abstractions */
  39
  40/* Initialize a ULP queue from a block of memory.  */
  41void sctp_ulpq_init(struct sctp_ulpq *ulpq, struct sctp_association *asoc)
 
  42{
  43	memset(ulpq, 0, sizeof(struct sctp_ulpq));
  44
  45	ulpq->asoc = asoc;
  46	skb_queue_head_init(&ulpq->reasm);
  47	skb_queue_head_init(&ulpq->reasm_uo);
  48	skb_queue_head_init(&ulpq->lobby);
  49	ulpq->pd_mode  = 0;
 
 
  50}
  51
  52
  53/* Flush the reassembly and ordering queues.  */
  54void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  55{
  56	struct sk_buff *skb;
  57	struct sctp_ulpevent *event;
  58
  59	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  60		event = sctp_skb2event(skb);
  61		sctp_ulpevent_free(event);
  62	}
  63
  64	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  65		event = sctp_skb2event(skb);
  66		sctp_ulpevent_free(event);
  67	}
  68
  69	while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
  70		event = sctp_skb2event(skb);
  71		sctp_ulpevent_free(event);
  72	}
  73}
  74
  75/* Dispose of a ulpqueue.  */
  76void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  77{
  78	sctp_ulpq_flush(ulpq);
  79}
  80
  81/* Process an incoming DATA chunk.  */
  82int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  83			gfp_t gfp)
  84{
  85	struct sk_buff_head temp;
  86	struct sctp_ulpevent *event;
  87	int event_eor = 0;
  88
  89	/* Create an event from the incoming chunk. */
  90	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
  91	if (!event)
  92		return -ENOMEM;
  93
  94	event->ssn = ntohs(chunk->subh.data_hdr->ssn);
  95	event->ppid = chunk->subh.data_hdr->ppid;
  96
  97	/* Do reassembly if needed.  */
  98	event = sctp_ulpq_reasm(ulpq, event);
  99
 100	/* Do ordering if needed.  */
 101	if (event) {
 102		/* Create a temporary list to collect chunks on.  */
 103		skb_queue_head_init(&temp);
 104		__skb_queue_tail(&temp, sctp_event2skb(event));
 105
 106		if (event->msg_flags & MSG_EOR)
 107			event = sctp_ulpq_order(ulpq, event);
 108	}
 109
 110	/* Send event to the ULP.  'event' is the sctp_ulpevent for
 111	 * very first SKB on the 'temp' list.
 112	 */
 113	if (event) {
 114		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
 115		sctp_ulpq_tail_event(ulpq, &temp);
 116	}
 117
 118	return event_eor;
 119}
 120
 121/* Add a new event for propagation to the ULP.  */
 122/* Clear the partial delivery mode for this socket.   Note: This
 123 * assumes that no association is currently in partial delivery mode.
 124 */
 125int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
 126{
 127	struct sctp_sock *sp = sctp_sk(sk);
 128
 129	if (atomic_dec_and_test(&sp->pd_mode)) {
 130		/* This means there are no other associations in PD, so
 131		 * we can go ahead and clear out the lobby in one shot
 132		 */
 133		if (!skb_queue_empty(&sp->pd_lobby)) {
 134			skb_queue_splice_tail_init(&sp->pd_lobby,
 135						   &sk->sk_receive_queue);
 
 
 136			return 1;
 137		}
 138	} else {
 139		/* There are other associations in PD, so we only need to
 140		 * pull stuff out of the lobby that belongs to the
 141		 * associations that is exiting PD (all of its notifications
 142		 * are posted here).
 143		 */
 144		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
 145			struct sk_buff *skb, *tmp;
 146			struct sctp_ulpevent *event;
 147
 148			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
 149				event = sctp_skb2event(skb);
 150				if (event->asoc == asoc) {
 151					__skb_unlink(skb, &sp->pd_lobby);
 152					__skb_queue_tail(&sk->sk_receive_queue,
 153							 skb);
 154				}
 155			}
 156		}
 157	}
 158
 159	return 0;
 160}
 161
 162/* Set the pd_mode on the socket and ulpq */
 163static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
 164{
 165	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
 166
 167	atomic_inc(&sp->pd_mode);
 168	ulpq->pd_mode = 1;
 169}
 170
 171/* Clear the pd_mode and restart any pending messages waiting for delivery. */
 172static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 173{
 174	ulpq->pd_mode = 0;
 175	sctp_ulpq_reasm_drain(ulpq);
 176	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 177}
 178
 179int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
 
 
 
 180{
 181	struct sock *sk = ulpq->asoc->base.sk;
 182	struct sctp_sock *sp = sctp_sk(sk);
 183	struct sctp_ulpevent *event;
 184	struct sk_buff_head *queue;
 185	struct sk_buff *skb;
 186	int clear_pd = 0;
 187
 188	skb = __skb_peek(skb_list);
 189	event = sctp_skb2event(skb);
 190
 191	/* If the socket is just going to throw this away, do not
 192	 * even try to deliver it.
 193	 */
 194	if (sk->sk_shutdown & RCV_SHUTDOWN &&
 195	    (sk->sk_shutdown & SEND_SHUTDOWN ||
 196	     !sctp_ulpevent_is_notification(event)))
 197		goto out_free;
 198
 199	if (!sctp_ulpevent_is_notification(event)) {
 200		sk_mark_napi_id(sk, skb);
 201		sk_incoming_cpu_update(sk);
 202	}
 203	/* Check if the user wishes to receive this event.  */
 204	if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
 205		goto out_free;
 206
 207	/* If we are in partial delivery mode, post to the lobby until
 208	 * partial delivery is cleared, unless, of course _this_ is
 209	 * the association the cause of the partial delivery.
 210	 */
 211
 212	if (atomic_read(&sp->pd_mode) == 0) {
 213		queue = &sk->sk_receive_queue;
 214	} else {
 215		if (ulpq->pd_mode) {
 216			/* If the association is in partial delivery, we
 217			 * need to finish delivering the partially processed
 218			 * packet before passing any other data.  This is
 219			 * because we don't truly support stream interleaving.
 220			 */
 221			if ((event->msg_flags & MSG_NOTIFICATION) ||
 222			    (SCTP_DATA_NOT_FRAG ==
 223				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
 224				queue = &sp->pd_lobby;
 225			else {
 226				clear_pd = event->msg_flags & MSG_EOR;
 227				queue = &sk->sk_receive_queue;
 228			}
 229		} else {
 230			/*
 231			 * If fragment interleave is enabled, we
 232			 * can queue this to the receive queue instead
 233			 * of the lobby.
 234			 */
 235			if (sp->frag_interleave)
 236				queue = &sk->sk_receive_queue;
 237			else
 238				queue = &sp->pd_lobby;
 239		}
 240	}
 241
 242	skb_queue_splice_tail_init(skb_list, queue);
 
 
 
 
 
 
 243
 244	/* Did we just complete partial delivery and need to get
 245	 * rolling again?  Move pending data to the receive
 246	 * queue.
 247	 */
 248	if (clear_pd)
 249		sctp_ulpq_clear_pd(ulpq);
 250
 251	if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
 252		if (!sock_owned_by_user(sk))
 253			sp->data_ready_signalled = 1;
 254		sk->sk_data_ready(sk);
 255	}
 256	return 1;
 257
 258out_free:
 259	sctp_queue_purge_ulpevents(skb_list);
 
 
 
 260
 261	return 0;
 262}
 263
 264/* 2nd Level Abstractions */
 265
 266/* Helper function to store chunks that need to be reassembled.  */
 267static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
 268					 struct sctp_ulpevent *event)
 269{
 270	struct sk_buff *pos;
 271	struct sctp_ulpevent *cevent;
 272	__u32 tsn, ctsn;
 273
 274	tsn = event->tsn;
 275
 276	/* See if it belongs at the end. */
 277	pos = skb_peek_tail(&ulpq->reasm);
 278	if (!pos) {
 279		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 280		return;
 281	}
 282
 283	/* Short circuit just dropping it at the end. */
 284	cevent = sctp_skb2event(pos);
 285	ctsn = cevent->tsn;
 286	if (TSN_lt(ctsn, tsn)) {
 287		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 288		return;
 289	}
 290
 291	/* Find the right place in this list. We store them by TSN.  */
 292	skb_queue_walk(&ulpq->reasm, pos) {
 293		cevent = sctp_skb2event(pos);
 294		ctsn = cevent->tsn;
 295
 296		if (TSN_lt(tsn, ctsn))
 297			break;
 298	}
 299
 300	/* Insert before pos. */
 301	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
 302
 303}
 304
 305/* Helper function to return an event corresponding to the reassembled
 306 * datagram.
 307 * This routine creates a re-assembled skb given the first and last skb's
 308 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 309 * payload was fragmented on the way and ip had to reassemble them.
 310 * We add the rest of skb's to the first skb's fraglist.
 311 */
 312struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
 313						  struct sk_buff_head *queue,
 314						  struct sk_buff *f_frag,
 315						  struct sk_buff *l_frag)
 316{
 317	struct sk_buff *pos;
 318	struct sk_buff *new = NULL;
 319	struct sctp_ulpevent *event;
 320	struct sk_buff *pnext, *last;
 321	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
 322
 323	/* Store the pointer to the 2nd skb */
 324	if (f_frag == l_frag)
 325		pos = NULL;
 326	else
 327		pos = f_frag->next;
 328
 329	/* Get the last skb in the f_frag's frag_list if present. */
 330	for (last = list; list; last = list, list = list->next)
 331		;
 332
 333	/* Add the list of remaining fragments to the first fragments
 334	 * frag_list.
 335	 */
 336	if (last)
 337		last->next = pos;
 338	else {
 339		if (skb_cloned(f_frag)) {
 340			/* This is a cloned skb, we can't just modify
 341			 * the frag_list.  We need a new skb to do that.
 342			 * Instead of calling skb_unshare(), we'll do it
 343			 * ourselves since we need to delay the free.
 344			 */
 345			new = skb_copy(f_frag, GFP_ATOMIC);
 346			if (!new)
 347				return NULL;	/* try again later */
 348
 349			sctp_skb_set_owner_r(new, f_frag->sk);
 350
 351			skb_shinfo(new)->frag_list = pos;
 352		} else
 353			skb_shinfo(f_frag)->frag_list = pos;
 354	}
 355
 356	/* Remove the first fragment from the reassembly queue.  */
 357	__skb_unlink(f_frag, queue);
 358
 359	/* if we did unshare, then free the old skb and re-assign */
 360	if (new) {
 361		kfree_skb(f_frag);
 362		f_frag = new;
 363	}
 364
 365	while (pos) {
 366
 367		pnext = pos->next;
 368
 369		/* Update the len and data_len fields of the first fragment. */
 370		f_frag->len += pos->len;
 371		f_frag->data_len += pos->len;
 372
 373		/* Remove the fragment from the reassembly queue.  */
 374		__skb_unlink(pos, queue);
 375
 376		/* Break if we have reached the last fragment.  */
 377		if (pos == l_frag)
 378			break;
 379		pos->next = pnext;
 380		pos = pnext;
 381	}
 382
 383	event = sctp_skb2event(f_frag);
 384	SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
 385
 386	return event;
 387}
 388
 389
 390/* Helper function to check if an incoming chunk has filled up the last
 391 * missing fragment in a SCTP datagram and return the corresponding event.
 392 */
 393static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
 394{
 395	struct sk_buff *pos;
 396	struct sctp_ulpevent *cevent;
 397	struct sk_buff *first_frag = NULL;
 398	__u32 ctsn, next_tsn;
 399	struct sctp_ulpevent *retval = NULL;
 400	struct sk_buff *pd_first = NULL;
 401	struct sk_buff *pd_last = NULL;
 402	size_t pd_len = 0;
 403	struct sctp_association *asoc;
 404	u32 pd_point;
 405
 406	/* Initialized to 0 just to avoid compiler warning message.  Will
 407	 * never be used with this value. It is referenced only after it
 408	 * is set when we find the first fragment of a message.
 409	 */
 410	next_tsn = 0;
 411
 412	/* The chunks are held in the reasm queue sorted by TSN.
 413	 * Walk through the queue sequentially and look for a sequence of
 414	 * fragmented chunks that complete a datagram.
 415	 * 'first_frag' and next_tsn are reset when we find a chunk which
 416	 * is the first fragment of a datagram. Once these 2 fields are set
 417	 * we expect to find the remaining middle fragments and the last
 418	 * fragment in order. If not, first_frag is reset to NULL and we
 419	 * start the next pass when we find another first fragment.
 420	 *
 421	 * There is a potential to do partial delivery if user sets
 422	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
 423	 * to see if can do PD.
 424	 */
 425	skb_queue_walk(&ulpq->reasm, pos) {
 426		cevent = sctp_skb2event(pos);
 427		ctsn = cevent->tsn;
 428
 429		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 430		case SCTP_DATA_FIRST_FRAG:
 431			/* If this "FIRST_FRAG" is the first
 432			 * element in the queue, then count it towards
 433			 * possible PD.
 434			 */
 435			if (skb_queue_is_first(&ulpq->reasm, pos)) {
 436			    pd_first = pos;
 437			    pd_last = pos;
 438			    pd_len = pos->len;
 439			} else {
 440			    pd_first = NULL;
 441			    pd_last = NULL;
 442			    pd_len = 0;
 443			}
 444
 445			first_frag = pos;
 446			next_tsn = ctsn + 1;
 447			break;
 448
 449		case SCTP_DATA_MIDDLE_FRAG:
 450			if ((first_frag) && (ctsn == next_tsn)) {
 451				next_tsn++;
 452				if (pd_first) {
 453				    pd_last = pos;
 454				    pd_len += pos->len;
 455				}
 456			} else
 457				first_frag = NULL;
 458			break;
 459
 460		case SCTP_DATA_LAST_FRAG:
 461			if (first_frag && (ctsn == next_tsn))
 462				goto found;
 463			else
 464				first_frag = NULL;
 465			break;
 466		}
 467	}
 468
 469	asoc = ulpq->asoc;
 470	if (pd_first) {
 471		/* Make sure we can enter partial deliver.
 472		 * We can trigger partial delivery only if framgent
 473		 * interleave is set, or the socket is not already
 474		 * in  partial delivery.
 475		 */
 476		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
 477		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
 478			goto done;
 479
 480		cevent = sctp_skb2event(pd_first);
 481		pd_point = sctp_sk(asoc->base.sk)->pd_point;
 482		if (pd_point && pd_point <= pd_len) {
 483			retval = sctp_make_reassembled_event(asoc->base.net,
 484							     &ulpq->reasm,
 485							     pd_first, pd_last);
 
 486			if (retval)
 487				sctp_ulpq_set_pd(ulpq);
 488		}
 489	}
 490done:
 491	return retval;
 492found:
 493	retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
 494					     &ulpq->reasm, first_frag, pos);
 495	if (retval)
 496		retval->msg_flags |= MSG_EOR;
 497	goto done;
 498}
 499
 500/* Retrieve the next set of fragments of a partial message. */
 501static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
 502{
 503	struct sk_buff *pos, *last_frag, *first_frag;
 504	struct sctp_ulpevent *cevent;
 505	__u32 ctsn, next_tsn;
 506	int is_last;
 507	struct sctp_ulpevent *retval;
 508
 509	/* The chunks are held in the reasm queue sorted by TSN.
 510	 * Walk through the queue sequentially and look for the first
 511	 * sequence of fragmented chunks.
 512	 */
 513
 514	if (skb_queue_empty(&ulpq->reasm))
 515		return NULL;
 516
 517	last_frag = first_frag = NULL;
 518	retval = NULL;
 519	next_tsn = 0;
 520	is_last = 0;
 521
 522	skb_queue_walk(&ulpq->reasm, pos) {
 523		cevent = sctp_skb2event(pos);
 524		ctsn = cevent->tsn;
 525
 526		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 527		case SCTP_DATA_FIRST_FRAG:
 528			if (!first_frag)
 529				return NULL;
 530			goto done;
 531		case SCTP_DATA_MIDDLE_FRAG:
 532			if (!first_frag) {
 533				first_frag = pos;
 534				next_tsn = ctsn + 1;
 535				last_frag = pos;
 536			} else if (next_tsn == ctsn) {
 537				next_tsn++;
 538				last_frag = pos;
 539			} else
 540				goto done;
 541			break;
 542		case SCTP_DATA_LAST_FRAG:
 543			if (!first_frag)
 544				first_frag = pos;
 545			else if (ctsn != next_tsn)
 546				goto done;
 547			last_frag = pos;
 548			is_last = 1;
 549			goto done;
 550		default:
 551			return NULL;
 552		}
 553	}
 554
 555	/* We have the reassembled event. There is no need to look
 556	 * further.
 557	 */
 558done:
 559	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
 560					     first_frag, last_frag);
 561	if (retval && is_last)
 562		retval->msg_flags |= MSG_EOR;
 563
 564	return retval;
 565}
 566
 567
 568/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 569 * need reassembling.
 570 */
 571static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 572						struct sctp_ulpevent *event)
 573{
 574	struct sctp_ulpevent *retval = NULL;
 575
 576	/* Check if this is part of a fragmented message.  */
 577	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
 578		event->msg_flags |= MSG_EOR;
 579		return event;
 580	}
 581
 582	sctp_ulpq_store_reasm(ulpq, event);
 583	if (!ulpq->pd_mode)
 584		retval = sctp_ulpq_retrieve_reassembled(ulpq);
 585	else {
 586		__u32 ctsn, ctsnap;
 587
 588		/* Do not even bother unless this is the next tsn to
 589		 * be delivered.
 590		 */
 591		ctsn = event->tsn;
 592		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
 593		if (TSN_lte(ctsn, ctsnap))
 594			retval = sctp_ulpq_retrieve_partial(ulpq);
 595	}
 596
 597	return retval;
 598}
 599
 600/* Retrieve the first part (sequential fragments) for partial delivery.  */
 601static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
 602{
 603	struct sk_buff *pos, *last_frag, *first_frag;
 604	struct sctp_ulpevent *cevent;
 605	__u32 ctsn, next_tsn;
 606	struct sctp_ulpevent *retval;
 607
 608	/* The chunks are held in the reasm queue sorted by TSN.
 609	 * Walk through the queue sequentially and look for a sequence of
 610	 * fragmented chunks that start a datagram.
 611	 */
 612
 613	if (skb_queue_empty(&ulpq->reasm))
 614		return NULL;
 615
 616	last_frag = first_frag = NULL;
 617	retval = NULL;
 618	next_tsn = 0;
 619
 620	skb_queue_walk(&ulpq->reasm, pos) {
 621		cevent = sctp_skb2event(pos);
 622		ctsn = cevent->tsn;
 623
 624		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 625		case SCTP_DATA_FIRST_FRAG:
 626			if (!first_frag) {
 627				first_frag = pos;
 628				next_tsn = ctsn + 1;
 629				last_frag = pos;
 630			} else
 631				goto done;
 632			break;
 633
 634		case SCTP_DATA_MIDDLE_FRAG:
 635			if (!first_frag)
 636				return NULL;
 637			if (ctsn == next_tsn) {
 638				next_tsn++;
 639				last_frag = pos;
 640			} else
 641				goto done;
 642			break;
 643
 644		case SCTP_DATA_LAST_FRAG:
 645			if (!first_frag)
 646				return NULL;
 647			else
 648				goto done;
 649			break;
 650
 651		default:
 652			return NULL;
 653		}
 654	}
 655
 656	/* We have the reassembled event. There is no need to look
 657	 * further.
 658	 */
 659done:
 660	retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
 661					     first_frag, last_frag);
 662	return retval;
 663}
 664
 665/*
 666 * Flush out stale fragments from the reassembly queue when processing
 667 * a Forward TSN.
 668 *
 669 * RFC 3758, Section 3.6
 670 *
 671 * After receiving and processing a FORWARD TSN, the data receiver MUST
 672 * take cautions in updating its re-assembly queue.  The receiver MUST
 673 * remove any partially reassembled message, which is still missing one
 674 * or more TSNs earlier than or equal to the new cumulative TSN point.
 675 * In the event that the receiver has invoked the partial delivery API,
 676 * a notification SHOULD also be generated to inform the upper layer API
 677 * that the message being partially delivered will NOT be completed.
 678 */
 679void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 680{
 681	struct sk_buff *pos, *tmp;
 682	struct sctp_ulpevent *event;
 683	__u32 tsn;
 684
 685	if (skb_queue_empty(&ulpq->reasm))
 686		return;
 687
 688	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
 689		event = sctp_skb2event(pos);
 690		tsn = event->tsn;
 691
 692		/* Since the entire message must be abandoned by the
 693		 * sender (item A3 in Section 3.5, RFC 3758), we can
 694		 * free all fragments on the list that are less then
 695		 * or equal to ctsn_point
 696		 */
 697		if (TSN_lte(tsn, fwd_tsn)) {
 698			__skb_unlink(pos, &ulpq->reasm);
 699			sctp_ulpevent_free(event);
 700		} else
 701			break;
 702	}
 703}
 704
 705/*
 706 * Drain the reassembly queue.  If we just cleared parted delivery, it
 707 * is possible that the reassembly queue will contain already reassembled
 708 * messages.  Retrieve any such messages and give them to the user.
 709 */
 710static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 711{
 712	struct sctp_ulpevent *event = NULL;
 
 713
 714	if (skb_queue_empty(&ulpq->reasm))
 715		return;
 716
 717	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
 718		struct sk_buff_head temp;
 719
 720		skb_queue_head_init(&temp);
 721		__skb_queue_tail(&temp, sctp_event2skb(event));
 722
 723		/* Do ordering if needed.  */
 724		if (event->msg_flags & MSG_EOR)
 725			event = sctp_ulpq_order(ulpq, event);
 
 726
 727		/* Send event to the ULP.  'event' is the
 728		 * sctp_ulpevent for  very first SKB on the  temp' list.
 729		 */
 730		if (event)
 731			sctp_ulpq_tail_event(ulpq, &temp);
 732	}
 733}
 734
 735
 736/* Helper function to gather skbs that have possibly become
 737 * ordered by an incoming chunk.
 738 */
 739static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
 740					      struct sctp_ulpevent *event)
 741{
 742	struct sk_buff_head *event_list;
 743	struct sk_buff *pos, *tmp;
 744	struct sctp_ulpevent *cevent;
 745	struct sctp_stream *stream;
 746	__u16 sid, csid, cssn;
 747
 748	sid = event->stream;
 749	stream  = &ulpq->asoc->stream;
 750
 751	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
 752
 753	/* We are holding the chunks by stream, by SSN.  */
 754	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
 755		cevent = (struct sctp_ulpevent *) pos->cb;
 756		csid = cevent->stream;
 757		cssn = cevent->ssn;
 758
 759		/* Have we gone too far?  */
 760		if (csid > sid)
 761			break;
 762
 763		/* Have we not gone far enough?  */
 764		if (csid < sid)
 765			continue;
 766
 767		if (cssn != sctp_ssn_peek(stream, in, sid))
 768			break;
 769
 770		/* Found it, so mark in the stream. */
 771		sctp_ssn_next(stream, in, sid);
 772
 773		__skb_unlink(pos, &ulpq->lobby);
 774
 775		/* Attach all gathered skbs to the event.  */
 776		__skb_queue_tail(event_list, pos);
 777	}
 778}
 779
 780/* Helper function to store chunks needing ordering.  */
 781static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 782					   struct sctp_ulpevent *event)
 783{
 784	struct sk_buff *pos;
 785	struct sctp_ulpevent *cevent;
 786	__u16 sid, csid;
 787	__u16 ssn, cssn;
 788
 789	pos = skb_peek_tail(&ulpq->lobby);
 790	if (!pos) {
 791		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 792		return;
 793	}
 794
 795	sid = event->stream;
 796	ssn = event->ssn;
 797
 798	cevent = (struct sctp_ulpevent *) pos->cb;
 799	csid = cevent->stream;
 800	cssn = cevent->ssn;
 801	if (sid > csid) {
 802		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 803		return;
 804	}
 805
 806	if ((sid == csid) && SSN_lt(cssn, ssn)) {
 807		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 808		return;
 809	}
 810
 811	/* Find the right place in this list.  We store them by
 812	 * stream ID and then by SSN.
 813	 */
 814	skb_queue_walk(&ulpq->lobby, pos) {
 815		cevent = (struct sctp_ulpevent *) pos->cb;
 816		csid = cevent->stream;
 817		cssn = cevent->ssn;
 818
 819		if (csid > sid)
 820			break;
 821		if (csid == sid && SSN_lt(ssn, cssn))
 822			break;
 823	}
 824
 825
 826	/* Insert before pos. */
 827	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
 828}
 829
 830static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 831					     struct sctp_ulpevent *event)
 832{
 833	__u16 sid, ssn;
 834	struct sctp_stream *stream;
 835
 836	/* Check if this message needs ordering.  */
 837	if (event->msg_flags & SCTP_DATA_UNORDERED)
 838		return event;
 839
 840	/* Note: The stream ID must be verified before this routine.  */
 841	sid = event->stream;
 842	ssn = event->ssn;
 843	stream  = &ulpq->asoc->stream;
 844
 845	/* Is this the expected SSN for this stream ID?  */
 846	if (ssn != sctp_ssn_peek(stream, in, sid)) {
 847		/* We've received something out of order, so find where it
 848		 * needs to be placed.  We order by stream and then by SSN.
 849		 */
 850		sctp_ulpq_store_ordered(ulpq, event);
 851		return NULL;
 852	}
 853
 854	/* Mark that the next chunk has been found.  */
 855	sctp_ssn_next(stream, in, sid);
 856
 857	/* Go find any other chunks that were waiting for
 858	 * ordering.
 859	 */
 860	sctp_ulpq_retrieve_ordered(ulpq, event);
 861
 862	return event;
 863}
 864
 865/* Helper function to gather skbs that have possibly become
 866 * ordered by forward tsn skipping their dependencies.
 867 */
 868static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 869{
 870	struct sk_buff *pos, *tmp;
 871	struct sctp_ulpevent *cevent;
 872	struct sctp_ulpevent *event;
 873	struct sctp_stream *stream;
 874	struct sk_buff_head temp;
 875	struct sk_buff_head *lobby = &ulpq->lobby;
 876	__u16 csid, cssn;
 877
 878	stream = &ulpq->asoc->stream;
 879
 880	/* We are holding the chunks by stream, by SSN.  */
 881	skb_queue_head_init(&temp);
 882	event = NULL;
 883	sctp_skb_for_each(pos, lobby, tmp) {
 884		cevent = (struct sctp_ulpevent *) pos->cb;
 885		csid = cevent->stream;
 886		cssn = cevent->ssn;
 887
 888		/* Have we gone too far?  */
 889		if (csid > sid)
 890			break;
 891
 892		/* Have we not gone far enough?  */
 893		if (csid < sid)
 894			continue;
 895
 896		/* see if this ssn has been marked by skipping */
 897		if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
 898			break;
 899
 900		__skb_unlink(pos, lobby);
 901		if (!event)
 902			/* Create a temporary list to collect chunks on.  */
 903			event = sctp_skb2event(pos);
 904
 905		/* Attach all gathered skbs to the event.  */
 906		__skb_queue_tail(&temp, pos);
 907	}
 908
 909	/* If we didn't reap any data, see if the next expected SSN
 910	 * is next on the queue and if so, use that.
 911	 */
 912	if (event == NULL && pos != (struct sk_buff *)lobby) {
 913		cevent = (struct sctp_ulpevent *) pos->cb;
 914		csid = cevent->stream;
 915		cssn = cevent->ssn;
 916
 917		if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
 918			sctp_ssn_next(stream, in, csid);
 919			__skb_unlink(pos, lobby);
 920			__skb_queue_tail(&temp, pos);
 921			event = sctp_skb2event(pos);
 922		}
 923	}
 924
 925	/* Send event to the ULP.  'event' is the sctp_ulpevent for
 926	 * very first SKB on the 'temp' list.
 927	 */
 928	if (event) {
 929		/* see if we have more ordered that we can deliver */
 930		sctp_ulpq_retrieve_ordered(ulpq, event);
 931		sctp_ulpq_tail_event(ulpq, &temp);
 932	}
 933}
 934
 935/* Skip over an SSN. This is used during the processing of
 936 * Forwared TSN chunk to skip over the abandoned ordered data
 937 */
 938void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 939{
 940	struct sctp_stream *stream;
 941
 942	/* Note: The stream ID must be verified before this routine.  */
 943	stream  = &ulpq->asoc->stream;
 944
 945	/* Is this an old SSN?  If so ignore. */
 946	if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
 947		return;
 948
 949	/* Mark that we are no longer expecting this SSN or lower. */
 950	sctp_ssn_skip(stream, in, sid, ssn);
 951
 952	/* Go find any other chunks that were waiting for
 953	 * ordering and deliver them if needed.
 954	 */
 955	sctp_ulpq_reap_ordered(ulpq, sid);
 956}
 957
 958__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
 959			    __u16 needed)
 960{
 961	__u16 freed = 0;
 962	__u32 tsn, last_tsn;
 963	struct sk_buff *skb, *flist, *last;
 964	struct sctp_ulpevent *event;
 965	struct sctp_tsnmap *tsnmap;
 966
 967	tsnmap = &ulpq->asoc->peer.tsn_map;
 968
 969	while ((skb = skb_peek_tail(list)) != NULL) {
 970		event = sctp_skb2event(skb);
 971		tsn = event->tsn;
 972
 973		/* Don't renege below the Cumulative TSN ACK Point. */
 974		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
 975			break;
 976
 977		/* Events in ordering queue may have multiple fragments
 978		 * corresponding to additional TSNs.  Sum the total
 979		 * freed space; find the last TSN.
 980		 */
 981		freed += skb_headlen(skb);
 982		flist = skb_shinfo(skb)->frag_list;
 983		for (last = flist; flist; flist = flist->next) {
 984			last = flist;
 985			freed += skb_headlen(last);
 986		}
 987		if (last)
 988			last_tsn = sctp_skb2event(last)->tsn;
 989		else
 990			last_tsn = tsn;
 991
 992		/* Unlink the event, then renege all applicable TSNs. */
 993		__skb_unlink(skb, list);
 994		sctp_ulpevent_free(event);
 995		while (TSN_lte(tsn, last_tsn)) {
 996			sctp_tsnmap_renege(tsnmap, tsn);
 997			tsn++;
 998		}
 999		if (freed >= needed)
1000			return freed;
1001	}
1002
1003	return freed;
1004}
1005
1006/* Renege 'needed' bytes from the ordering queue. */
1007static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1008{
1009	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1010}
1011
1012/* Renege 'needed' bytes from the reassembly queue. */
1013static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1014{
1015	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1016}
1017
1018/* Partial deliver the first message as there is pressure on rwnd. */
1019void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1020				gfp_t gfp)
1021{
1022	struct sctp_ulpevent *event;
1023	struct sctp_association *asoc;
1024	struct sctp_sock *sp;
1025	__u32 ctsn;
1026	struct sk_buff *skb;
1027
1028	asoc = ulpq->asoc;
1029	sp = sctp_sk(asoc->base.sk);
1030
1031	/* If the association is already in Partial Delivery mode
1032	 * we have nothing to do.
1033	 */
1034	if (ulpq->pd_mode)
1035		return;
1036
1037	/* Data must be at or below the Cumulative TSN ACK Point to
1038	 * start partial delivery.
1039	 */
1040	skb = skb_peek(&asoc->ulpq.reasm);
1041	if (skb != NULL) {
1042		ctsn = sctp_skb2event(skb)->tsn;
1043		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1044			return;
1045	}
1046
1047	/* If the user enabled fragment interleave socket option,
1048	 * multiple associations can enter partial delivery.
1049	 * Otherwise, we can only enter partial delivery if the
1050	 * socket is not in partial deliver mode.
1051	 */
1052	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1053		/* Is partial delivery possible?  */
1054		event = sctp_ulpq_retrieve_first(ulpq);
1055		/* Send event to the ULP.   */
1056		if (event) {
1057			struct sk_buff_head temp;
1058
1059			skb_queue_head_init(&temp);
1060			__skb_queue_tail(&temp, sctp_event2skb(event));
1061			sctp_ulpq_tail_event(ulpq, &temp);
1062			sctp_ulpq_set_pd(ulpq);
1063			return;
1064		}
1065	}
1066}
1067
1068/* Renege some packets to make room for an incoming chunk.  */
1069void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1070		      gfp_t gfp)
1071{
1072	struct sctp_association *asoc = ulpq->asoc;
1073	__u32 freed = 0;
1074	__u16 needed;
 
1075
1076	needed = ntohs(chunk->chunk_hdr->length) -
1077		 sizeof(struct sctp_data_chunk);
 
 
 
 
 
1078
1079	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1080		freed = sctp_ulpq_renege_order(ulpq, needed);
1081		if (freed < needed)
1082			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
 
1083	}
1084	/* If able to free enough room, accept this chunk. */
1085	if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
1086	    freed >= needed) {
1087		int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1088		/*
1089		 * Enter partial delivery if chunk has not been
1090		 * delivered; otherwise, drain the reassembly queue.
1091		 */
1092		if (retval <= 0)
1093			sctp_ulpq_partial_delivery(ulpq, gfp);
1094		else if (retval == 1)
1095			sctp_ulpq_reasm_drain(ulpq);
1096	}
 
 
1097}
1098
 
 
1099/* Notify the application if an association is aborted and in
1100 * partial delivery mode.  Send up any pending received messages.
1101 */
1102void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1103{
1104	struct sctp_ulpevent *ev = NULL;
1105	struct sctp_sock *sp;
1106	struct sock *sk;
1107
1108	if (!ulpq->pd_mode)
1109		return;
1110
1111	sk = ulpq->asoc->base.sk;
1112	sp = sctp_sk(sk);
1113	if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1114				       SCTP_PARTIAL_DELIVERY_EVENT))
1115		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1116					      SCTP_PARTIAL_DELIVERY_ABORTED,
1117					      0, 0, 0, gfp);
1118	if (ev)
1119		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1120
1121	/* If there is data waiting, send it up the socket now. */
1122	if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1123		sp->data_ready_signalled = 1;
1124		sk->sk_data_ready(sk);
1125	}
1126}