Linux Audio

Check our new training course

Loading...
v3.5.6
 
   1/* SCTP kernel implementation
   2 * (C) Copyright IBM Corp. 2001, 2004
   3 * Copyright (c) 1999-2000 Cisco, Inc.
   4 * Copyright (c) 1999-2001 Motorola, Inc.
   5 * Copyright (c) 2001 Intel Corp.
   6 * Copyright (c) 2001 Nokia, Inc.
   7 * Copyright (c) 2001 La Monte H.P. Yarroll
   8 *
   9 * This abstraction carries sctp events to the ULP (sockets).
  10 *
  11 * This SCTP implementation is free software;
  12 * you can redistribute it and/or modify it under the terms of
  13 * the GNU General Public License as published by
  14 * the Free Software Foundation; either version 2, or (at your option)
  15 * any later version.
  16 *
  17 * This SCTP implementation is distributed in the hope that it
  18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
  19 *                 ************************
  20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  21 * See the GNU General Public License for more details.
  22 *
  23 * You should have received a copy of the GNU General Public License
  24 * along with GNU CC; see the file COPYING.  If not, write to
  25 * the Free Software Foundation, 59 Temple Place - Suite 330,
  26 * Boston, MA 02111-1307, USA.
  27 *
  28 * Please send any bug reports or fixes you make to the
  29 * email address(es):
  30 *    lksctp developers <lksctp-developers@lists.sourceforge.net>
  31 *
  32 * Or submit a bug report through the following website:
  33 *    http://www.sf.net/projects/lksctp
  34 *
  35 * Written or modified by:
  36 *    Jon Grimm             <jgrimm@us.ibm.com>
  37 *    La Monte H.P. Yarroll <piggy@acm.org>
  38 *    Sridhar Samudrala     <sri@us.ibm.com>
  39 *
  40 * Any bugs reported given to us we will try to fix... any fixes shared will
  41 * be incorporated into the next SCTP release.
  42 */
  43
  44#include <linux/slab.h>
  45#include <linux/types.h>
  46#include <linux/skbuff.h>
  47#include <net/sock.h>
 
  48#include <net/sctp/structs.h>
  49#include <net/sctp/sctp.h>
  50#include <net/sctp/sm.h>
  51
  52/* Forward declarations for internal helpers.  */
  53static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  54					      struct sctp_ulpevent *);
  55static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
  56					      struct sctp_ulpevent *);
  57static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  58
  59/* 1st Level Abstractions */
  60
  61/* Initialize a ULP queue from a block of memory.  */
  62struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  63				 struct sctp_association *asoc)
  64{
  65	memset(ulpq, 0, sizeof(struct sctp_ulpq));
  66
  67	ulpq->asoc = asoc;
  68	skb_queue_head_init(&ulpq->reasm);
 
  69	skb_queue_head_init(&ulpq->lobby);
  70	ulpq->pd_mode  = 0;
  71	ulpq->malloced = 0;
  72
  73	return ulpq;
  74}
  75
  76
  77/* Flush the reassembly and ordering queues.  */
  78void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  79{
  80	struct sk_buff *skb;
  81	struct sctp_ulpevent *event;
  82
  83	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  84		event = sctp_skb2event(skb);
  85		sctp_ulpevent_free(event);
  86	}
  87
  88	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  89		event = sctp_skb2event(skb);
  90		sctp_ulpevent_free(event);
  91	}
  92
 
 
 
 
  93}
  94
  95/* Dispose of a ulpqueue.  */
  96void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  97{
  98	sctp_ulpq_flush(ulpq);
  99	if (ulpq->malloced)
 100		kfree(ulpq);
 101}
 102
 103/* Process an incoming DATA chunk.  */
 104int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
 105			gfp_t gfp)
 106{
 107	struct sk_buff_head temp;
 108	struct sctp_ulpevent *event;
 
 109
 110	/* Create an event from the incoming chunk. */
 111	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
 112	if (!event)
 113		return -ENOMEM;
 114
 
 
 
 115	/* Do reassembly if needed.  */
 116	event = sctp_ulpq_reasm(ulpq, event);
 117
 118	/* Do ordering if needed.  */
 119	if ((event) && (event->msg_flags & MSG_EOR)){
 120		/* Create a temporary list to collect chunks on.  */
 121		skb_queue_head_init(&temp);
 122		__skb_queue_tail(&temp, sctp_event2skb(event));
 123
 124		event = sctp_ulpq_order(ulpq, event);
 
 125	}
 126
 127	/* Send event to the ULP.  'event' is the sctp_ulpevent for
 128	 * very first SKB on the 'temp' list.
 129	 */
 130	if (event)
 131		sctp_ulpq_tail_event(ulpq, event);
 
 
 132
 133	return 0;
 134}
 135
 136/* Add a new event for propagation to the ULP.  */
 137/* Clear the partial delivery mode for this socket.   Note: This
 138 * assumes that no association is currently in partial delivery mode.
 139 */
 140int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
 141{
 142	struct sctp_sock *sp = sctp_sk(sk);
 143
 144	if (atomic_dec_and_test(&sp->pd_mode)) {
 145		/* This means there are no other associations in PD, so
 146		 * we can go ahead and clear out the lobby in one shot
 147		 */
 148		if (!skb_queue_empty(&sp->pd_lobby)) {
 149			struct list_head *list;
 150			sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
 151			list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
 152			INIT_LIST_HEAD(list);
 153			return 1;
 154		}
 155	} else {
 156		/* There are other associations in PD, so we only need to
 157		 * pull stuff out of the lobby that belongs to the
 158		 * associations that is exiting PD (all of its notifications
 159		 * are posted here).
 160		 */
 161		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
 162			struct sk_buff *skb, *tmp;
 163			struct sctp_ulpevent *event;
 164
 165			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
 166				event = sctp_skb2event(skb);
 167				if (event->asoc == asoc) {
 168					__skb_unlink(skb, &sp->pd_lobby);
 169					__skb_queue_tail(&sk->sk_receive_queue,
 170							 skb);
 171				}
 172			}
 173		}
 174	}
 175
 176	return 0;
 177}
 178
 179/* Set the pd_mode on the socket and ulpq */
 180static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
 181{
 182	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
 183
 184	atomic_inc(&sp->pd_mode);
 185	ulpq->pd_mode = 1;
 186}
 187
 188/* Clear the pd_mode and restart any pending messages waiting for delivery. */
 189static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 190{
 191	ulpq->pd_mode = 0;
 192	sctp_ulpq_reasm_drain(ulpq);
 193	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 194}
 195
 196/* If the SKB of 'event' is on a list, it is the first such member
 197 * of that list.
 198 */
 199int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
 200{
 201	struct sock *sk = ulpq->asoc->base.sk;
 202	struct sk_buff_head *queue, *skb_list;
 203	struct sk_buff *skb = sctp_event2skb(event);
 
 
 204	int clear_pd = 0;
 205
 206	skb_list = (struct sk_buff_head *) skb->prev;
 
 207
 208	/* If the socket is just going to throw this away, do not
 209	 * even try to deliver it.
 210	 */
 211	if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
 
 
 212		goto out_free;
 213
 
 
 
 
 214	/* Check if the user wishes to receive this event.  */
 215	if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
 216		goto out_free;
 217
 218	/* If we are in partial delivery mode, post to the lobby until
 219	 * partial delivery is cleared, unless, of course _this_ is
 220	 * the association the cause of the partial delivery.
 221	 */
 222
 223	if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
 224		queue = &sk->sk_receive_queue;
 225	} else {
 226		if (ulpq->pd_mode) {
 227			/* If the association is in partial delivery, we
 228			 * need to finish delivering the partially processed
 229			 * packet before passing any other data.  This is
 230			 * because we don't truly support stream interleaving.
 231			 */
 232			if ((event->msg_flags & MSG_NOTIFICATION) ||
 233			    (SCTP_DATA_NOT_FRAG ==
 234				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
 235				queue = &sctp_sk(sk)->pd_lobby;
 236			else {
 237				clear_pd = event->msg_flags & MSG_EOR;
 238				queue = &sk->sk_receive_queue;
 239			}
 240		} else {
 241			/*
 242			 * If fragment interleave is enabled, we
 243			 * can queue this to the receive queue instead
 244			 * of the lobby.
 245			 */
 246			if (sctp_sk(sk)->frag_interleave)
 247				queue = &sk->sk_receive_queue;
 248			else
 249				queue = &sctp_sk(sk)->pd_lobby;
 250		}
 251	}
 252
 253	/* If we are harvesting multiple skbs they will be
 254	 * collected on a list.
 255	 */
 256	if (skb_list)
 257		sctp_skb_list_tail(skb_list, queue);
 258	else
 259		__skb_queue_tail(queue, skb);
 260
 261	/* Did we just complete partial delivery and need to get
 262	 * rolling again?  Move pending data to the receive
 263	 * queue.
 264	 */
 265	if (clear_pd)
 266		sctp_ulpq_clear_pd(ulpq);
 267
 268	if (queue == &sk->sk_receive_queue)
 269		sk->sk_data_ready(sk, 0);
 
 
 
 270	return 1;
 271
 272out_free:
 273	if (skb_list)
 274		sctp_queue_purge_ulpevents(skb_list);
 275	else
 276		sctp_ulpevent_free(event);
 277
 278	return 0;
 279}
 280
 281/* 2nd Level Abstractions */
 282
 283/* Helper function to store chunks that need to be reassembled.  */
 284static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
 285					 struct sctp_ulpevent *event)
 286{
 287	struct sk_buff *pos;
 288	struct sctp_ulpevent *cevent;
 289	__u32 tsn, ctsn;
 290
 291	tsn = event->tsn;
 292
 293	/* See if it belongs at the end. */
 294	pos = skb_peek_tail(&ulpq->reasm);
 295	if (!pos) {
 296		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 297		return;
 298	}
 299
 300	/* Short circuit just dropping it at the end. */
 301	cevent = sctp_skb2event(pos);
 302	ctsn = cevent->tsn;
 303	if (TSN_lt(ctsn, tsn)) {
 304		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 305		return;
 306	}
 307
 308	/* Find the right place in this list. We store them by TSN.  */
 309	skb_queue_walk(&ulpq->reasm, pos) {
 310		cevent = sctp_skb2event(pos);
 311		ctsn = cevent->tsn;
 312
 313		if (TSN_lt(tsn, ctsn))
 314			break;
 315	}
 316
 317	/* Insert before pos. */
 318	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
 319
 320}
 321
 322/* Helper function to return an event corresponding to the reassembled
 323 * datagram.
 324 * This routine creates a re-assembled skb given the first and last skb's
 325 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 326 * payload was fragmented on the way and ip had to reassemble them.
 327 * We add the rest of skb's to the first skb's fraglist.
 328 */
 329static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
 
 
 
 330{
 331	struct sk_buff *pos;
 332	struct sk_buff *new = NULL;
 333	struct sctp_ulpevent *event;
 334	struct sk_buff *pnext, *last;
 335	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
 336
 337	/* Store the pointer to the 2nd skb */
 338	if (f_frag == l_frag)
 339		pos = NULL;
 340	else
 341		pos = f_frag->next;
 342
 343	/* Get the last skb in the f_frag's frag_list if present. */
 344	for (last = list; list; last = list, list = list->next);
 
 345
 346	/* Add the list of remaining fragments to the first fragments
 347	 * frag_list.
 348	 */
 349	if (last)
 350		last->next = pos;
 351	else {
 352		if (skb_cloned(f_frag)) {
 353			/* This is a cloned skb, we can't just modify
 354			 * the frag_list.  We need a new skb to do that.
 355			 * Instead of calling skb_unshare(), we'll do it
 356			 * ourselves since we need to delay the free.
 357			 */
 358			new = skb_copy(f_frag, GFP_ATOMIC);
 359			if (!new)
 360				return NULL;	/* try again later */
 361
 362			sctp_skb_set_owner_r(new, f_frag->sk);
 363
 364			skb_shinfo(new)->frag_list = pos;
 365		} else
 366			skb_shinfo(f_frag)->frag_list = pos;
 367	}
 368
 369	/* Remove the first fragment from the reassembly queue.  */
 370	__skb_unlink(f_frag, queue);
 371
 372	/* if we did unshare, then free the old skb and re-assign */
 373	if (new) {
 374		kfree_skb(f_frag);
 375		f_frag = new;
 376	}
 377
 378	while (pos) {
 379
 380		pnext = pos->next;
 381
 382		/* Update the len and data_len fields of the first fragment. */
 383		f_frag->len += pos->len;
 384		f_frag->data_len += pos->len;
 385
 386		/* Remove the fragment from the reassembly queue.  */
 387		__skb_unlink(pos, queue);
 388
 389		/* Break if we have reached the last fragment.  */
 390		if (pos == l_frag)
 391			break;
 392		pos->next = pnext;
 393		pos = pnext;
 394	}
 395
 396	event = sctp_skb2event(f_frag);
 397	SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
 398
 399	return event;
 400}
 401
 402
 403/* Helper function to check if an incoming chunk has filled up the last
 404 * missing fragment in a SCTP datagram and return the corresponding event.
 405 */
 406static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
 407{
 408	struct sk_buff *pos;
 409	struct sctp_ulpevent *cevent;
 410	struct sk_buff *first_frag = NULL;
 411	__u32 ctsn, next_tsn;
 412	struct sctp_ulpevent *retval = NULL;
 413	struct sk_buff *pd_first = NULL;
 414	struct sk_buff *pd_last = NULL;
 415	size_t pd_len = 0;
 416	struct sctp_association *asoc;
 417	u32 pd_point;
 418
 419	/* Initialized to 0 just to avoid compiler warning message.  Will
 420	 * never be used with this value. It is referenced only after it
 421	 * is set when we find the first fragment of a message.
 422	 */
 423	next_tsn = 0;
 424
 425	/* The chunks are held in the reasm queue sorted by TSN.
 426	 * Walk through the queue sequentially and look for a sequence of
 427	 * fragmented chunks that complete a datagram.
 428	 * 'first_frag' and next_tsn are reset when we find a chunk which
 429	 * is the first fragment of a datagram. Once these 2 fields are set
 430	 * we expect to find the remaining middle fragments and the last
 431	 * fragment in order. If not, first_frag is reset to NULL and we
 432	 * start the next pass when we find another first fragment.
 433	 *
 434	 * There is a potential to do partial delivery if user sets
 435	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
 436	 * to see if can do PD.
 437	 */
 438	skb_queue_walk(&ulpq->reasm, pos) {
 439		cevent = sctp_skb2event(pos);
 440		ctsn = cevent->tsn;
 441
 442		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 443		case SCTP_DATA_FIRST_FRAG:
 444			/* If this "FIRST_FRAG" is the first
 445			 * element in the queue, then count it towards
 446			 * possible PD.
 447			 */
 448			if (pos == ulpq->reasm.next) {
 449			    pd_first = pos;
 450			    pd_last = pos;
 451			    pd_len = pos->len;
 452			} else {
 453			    pd_first = NULL;
 454			    pd_last = NULL;
 455			    pd_len = 0;
 456			}
 457
 458			first_frag = pos;
 459			next_tsn = ctsn + 1;
 460			break;
 461
 462		case SCTP_DATA_MIDDLE_FRAG:
 463			if ((first_frag) && (ctsn == next_tsn)) {
 464				next_tsn++;
 465				if (pd_first) {
 466				    pd_last = pos;
 467				    pd_len += pos->len;
 468				}
 469			} else
 470				first_frag = NULL;
 471			break;
 472
 473		case SCTP_DATA_LAST_FRAG:
 474			if (first_frag && (ctsn == next_tsn))
 475				goto found;
 476			else
 477				first_frag = NULL;
 478			break;
 479		}
 480	}
 481
 482	asoc = ulpq->asoc;
 483	if (pd_first) {
 484		/* Make sure we can enter partial deliver.
 485		 * We can trigger partial delivery only if framgent
 486		 * interleave is set, or the socket is not already
 487		 * in  partial delivery.
 488		 */
 489		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
 490		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
 491			goto done;
 492
 493		cevent = sctp_skb2event(pd_first);
 494		pd_point = sctp_sk(asoc->base.sk)->pd_point;
 495		if (pd_point && pd_point <= pd_len) {
 496			retval = sctp_make_reassembled_event(&ulpq->reasm,
 
 497							     pd_first,
 498							     pd_last);
 499			if (retval)
 500				sctp_ulpq_set_pd(ulpq);
 501		}
 502	}
 503done:
 504	return retval;
 505found:
 506	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
 
 507	if (retval)
 508		retval->msg_flags |= MSG_EOR;
 509	goto done;
 510}
 511
 512/* Retrieve the next set of fragments of a partial message. */
 513static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
 514{
 515	struct sk_buff *pos, *last_frag, *first_frag;
 516	struct sctp_ulpevent *cevent;
 517	__u32 ctsn, next_tsn;
 518	int is_last;
 519	struct sctp_ulpevent *retval;
 520
 521	/* The chunks are held in the reasm queue sorted by TSN.
 522	 * Walk through the queue sequentially and look for the first
 523	 * sequence of fragmented chunks.
 524	 */
 525
 526	if (skb_queue_empty(&ulpq->reasm))
 527		return NULL;
 528
 529	last_frag = first_frag = NULL;
 530	retval = NULL;
 531	next_tsn = 0;
 532	is_last = 0;
 533
 534	skb_queue_walk(&ulpq->reasm, pos) {
 535		cevent = sctp_skb2event(pos);
 536		ctsn = cevent->tsn;
 537
 538		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 
 
 
 
 539		case SCTP_DATA_MIDDLE_FRAG:
 540			if (!first_frag) {
 541				first_frag = pos;
 542				next_tsn = ctsn + 1;
 543				last_frag = pos;
 544			} else if (next_tsn == ctsn)
 545				next_tsn++;
 546			else
 
 547				goto done;
 548			break;
 549		case SCTP_DATA_LAST_FRAG:
 550			if (!first_frag)
 551				first_frag = pos;
 552			else if (ctsn != next_tsn)
 553				goto done;
 554			last_frag = pos;
 555			is_last = 1;
 556			goto done;
 557		default:
 558			return NULL;
 559		}
 560	}
 561
 562	/* We have the reassembled event. There is no need to look
 563	 * further.
 564	 */
 565done:
 566	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
 
 567	if (retval && is_last)
 568		retval->msg_flags |= MSG_EOR;
 569
 570	return retval;
 571}
 572
 573
 574/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 575 * need reassembling.
 576 */
 577static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 578						struct sctp_ulpevent *event)
 579{
 580	struct sctp_ulpevent *retval = NULL;
 581
 582	/* Check if this is part of a fragmented message.  */
 583	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
 584		event->msg_flags |= MSG_EOR;
 585		return event;
 586	}
 587
 588	sctp_ulpq_store_reasm(ulpq, event);
 589	if (!ulpq->pd_mode)
 590		retval = sctp_ulpq_retrieve_reassembled(ulpq);
 591	else {
 592		__u32 ctsn, ctsnap;
 593
 594		/* Do not even bother unless this is the next tsn to
 595		 * be delivered.
 596		 */
 597		ctsn = event->tsn;
 598		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
 599		if (TSN_lte(ctsn, ctsnap))
 600			retval = sctp_ulpq_retrieve_partial(ulpq);
 601	}
 602
 603	return retval;
 604}
 605
 606/* Retrieve the first part (sequential fragments) for partial delivery.  */
 607static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
 608{
 609	struct sk_buff *pos, *last_frag, *first_frag;
 610	struct sctp_ulpevent *cevent;
 611	__u32 ctsn, next_tsn;
 612	struct sctp_ulpevent *retval;
 613
 614	/* The chunks are held in the reasm queue sorted by TSN.
 615	 * Walk through the queue sequentially and look for a sequence of
 616	 * fragmented chunks that start a datagram.
 617	 */
 618
 619	if (skb_queue_empty(&ulpq->reasm))
 620		return NULL;
 621
 622	last_frag = first_frag = NULL;
 623	retval = NULL;
 624	next_tsn = 0;
 625
 626	skb_queue_walk(&ulpq->reasm, pos) {
 627		cevent = sctp_skb2event(pos);
 628		ctsn = cevent->tsn;
 629
 630		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 631		case SCTP_DATA_FIRST_FRAG:
 632			if (!first_frag) {
 633				first_frag = pos;
 634				next_tsn = ctsn + 1;
 635				last_frag = pos;
 636			} else
 637				goto done;
 638			break;
 639
 640		case SCTP_DATA_MIDDLE_FRAG:
 641			if (!first_frag)
 642				return NULL;
 643			if (ctsn == next_tsn) {
 644				next_tsn++;
 645				last_frag = pos;
 646			} else
 647				goto done;
 648			break;
 
 
 
 
 
 
 
 
 649		default:
 650			return NULL;
 651		}
 652	}
 653
 654	/* We have the reassembled event. There is no need to look
 655	 * further.
 656	 */
 657done:
 658	retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
 
 659	return retval;
 660}
 661
 662/*
 663 * Flush out stale fragments from the reassembly queue when processing
 664 * a Forward TSN.
 665 *
 666 * RFC 3758, Section 3.6
 667 *
 668 * After receiving and processing a FORWARD TSN, the data receiver MUST
 669 * take cautions in updating its re-assembly queue.  The receiver MUST
 670 * remove any partially reassembled message, which is still missing one
 671 * or more TSNs earlier than or equal to the new cumulative TSN point.
 672 * In the event that the receiver has invoked the partial delivery API,
 673 * a notification SHOULD also be generated to inform the upper layer API
 674 * that the message being partially delivered will NOT be completed.
 675 */
 676void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 677{
 678	struct sk_buff *pos, *tmp;
 679	struct sctp_ulpevent *event;
 680	__u32 tsn;
 681
 682	if (skb_queue_empty(&ulpq->reasm))
 683		return;
 684
 685	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
 686		event = sctp_skb2event(pos);
 687		tsn = event->tsn;
 688
 689		/* Since the entire message must be abandoned by the
 690		 * sender (item A3 in Section 3.5, RFC 3758), we can
 691		 * free all fragments on the list that are less then
 692		 * or equal to ctsn_point
 693		 */
 694		if (TSN_lte(tsn, fwd_tsn)) {
 695			__skb_unlink(pos, &ulpq->reasm);
 696			sctp_ulpevent_free(event);
 697		} else
 698			break;
 699	}
 700}
 701
 702/*
 703 * Drain the reassembly queue.  If we just cleared parted delivery, it
 704 * is possible that the reassembly queue will contain already reassembled
 705 * messages.  Retrieve any such messages and give them to the user.
 706 */
 707static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 708{
 709	struct sctp_ulpevent *event = NULL;
 710	struct sk_buff_head temp;
 711
 712	if (skb_queue_empty(&ulpq->reasm))
 713		return;
 714
 715	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
 716		/* Do ordering if needed.  */
 717		if ((event) && (event->msg_flags & MSG_EOR)){
 718			skb_queue_head_init(&temp);
 719			__skb_queue_tail(&temp, sctp_event2skb(event));
 720
 
 
 
 
 
 721			event = sctp_ulpq_order(ulpq, event);
 722		}
 723
 724		/* Send event to the ULP.  'event' is the
 725		 * sctp_ulpevent for  very first SKB on the  temp' list.
 726		 */
 727		if (event)
 728			sctp_ulpq_tail_event(ulpq, event);
 729	}
 730}
 731
 732
 733/* Helper function to gather skbs that have possibly become
 734 * ordered by an an incoming chunk.
 735 */
 736static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
 737					      struct sctp_ulpevent *event)
 738{
 739	struct sk_buff_head *event_list;
 740	struct sk_buff *pos, *tmp;
 741	struct sctp_ulpevent *cevent;
 742	struct sctp_stream *in;
 743	__u16 sid, csid, cssn;
 744
 745	sid = event->stream;
 746	in  = &ulpq->asoc->ssnmap->in;
 747
 748	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
 749
 750	/* We are holding the chunks by stream, by SSN.  */
 751	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
 752		cevent = (struct sctp_ulpevent *) pos->cb;
 753		csid = cevent->stream;
 754		cssn = cevent->ssn;
 755
 756		/* Have we gone too far?  */
 757		if (csid > sid)
 758			break;
 759
 760		/* Have we not gone far enough?  */
 761		if (csid < sid)
 762			continue;
 763
 764		if (cssn != sctp_ssn_peek(in, sid))
 765			break;
 766
 767		/* Found it, so mark in the ssnmap. */
 768		sctp_ssn_next(in, sid);
 769
 770		__skb_unlink(pos, &ulpq->lobby);
 771
 772		/* Attach all gathered skbs to the event.  */
 773		__skb_queue_tail(event_list, pos);
 774	}
 775}
 776
 777/* Helper function to store chunks needing ordering.  */
 778static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 779					   struct sctp_ulpevent *event)
 780{
 781	struct sk_buff *pos;
 782	struct sctp_ulpevent *cevent;
 783	__u16 sid, csid;
 784	__u16 ssn, cssn;
 785
 786	pos = skb_peek_tail(&ulpq->lobby);
 787	if (!pos) {
 788		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 789		return;
 790	}
 791
 792	sid = event->stream;
 793	ssn = event->ssn;
 794
 795	cevent = (struct sctp_ulpevent *) pos->cb;
 796	csid = cevent->stream;
 797	cssn = cevent->ssn;
 798	if (sid > csid) {
 799		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 800		return;
 801	}
 802
 803	if ((sid == csid) && SSN_lt(cssn, ssn)) {
 804		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 805		return;
 806	}
 807
 808	/* Find the right place in this list.  We store them by
 809	 * stream ID and then by SSN.
 810	 */
 811	skb_queue_walk(&ulpq->lobby, pos) {
 812		cevent = (struct sctp_ulpevent *) pos->cb;
 813		csid = cevent->stream;
 814		cssn = cevent->ssn;
 815
 816		if (csid > sid)
 817			break;
 818		if (csid == sid && SSN_lt(ssn, cssn))
 819			break;
 820	}
 821
 822
 823	/* Insert before pos. */
 824	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
 825}
 826
 827static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 828					     struct sctp_ulpevent *event)
 829{
 830	__u16 sid, ssn;
 831	struct sctp_stream *in;
 832
 833	/* Check if this message needs ordering.  */
 834	if (SCTP_DATA_UNORDERED & event->msg_flags)
 835		return event;
 836
 837	/* Note: The stream ID must be verified before this routine.  */
 838	sid = event->stream;
 839	ssn = event->ssn;
 840	in  = &ulpq->asoc->ssnmap->in;
 841
 842	/* Is this the expected SSN for this stream ID?  */
 843	if (ssn != sctp_ssn_peek(in, sid)) {
 844		/* We've received something out of order, so find where it
 845		 * needs to be placed.  We order by stream and then by SSN.
 846		 */
 847		sctp_ulpq_store_ordered(ulpq, event);
 848		return NULL;
 849	}
 850
 851	/* Mark that the next chunk has been found.  */
 852	sctp_ssn_next(in, sid);
 853
 854	/* Go find any other chunks that were waiting for
 855	 * ordering.
 856	 */
 857	sctp_ulpq_retrieve_ordered(ulpq, event);
 858
 859	return event;
 860}
 861
 862/* Helper function to gather skbs that have possibly become
 863 * ordered by forward tsn skipping their dependencies.
 864 */
 865static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 866{
 867	struct sk_buff *pos, *tmp;
 868	struct sctp_ulpevent *cevent;
 869	struct sctp_ulpevent *event;
 870	struct sctp_stream *in;
 871	struct sk_buff_head temp;
 872	struct sk_buff_head *lobby = &ulpq->lobby;
 873	__u16 csid, cssn;
 874
 875	in  = &ulpq->asoc->ssnmap->in;
 876
 877	/* We are holding the chunks by stream, by SSN.  */
 878	skb_queue_head_init(&temp);
 879	event = NULL;
 880	sctp_skb_for_each(pos, lobby, tmp) {
 881		cevent = (struct sctp_ulpevent *) pos->cb;
 882		csid = cevent->stream;
 883		cssn = cevent->ssn;
 884
 885		/* Have we gone too far?  */
 886		if (csid > sid)
 887			break;
 888
 889		/* Have we not gone far enough?  */
 890		if (csid < sid)
 891			continue;
 892
 893		/* see if this ssn has been marked by skipping */
 894		if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
 895			break;
 896
 897		__skb_unlink(pos, lobby);
 898		if (!event)
 899			/* Create a temporary list to collect chunks on.  */
 900			event = sctp_skb2event(pos);
 901
 902		/* Attach all gathered skbs to the event.  */
 903		__skb_queue_tail(&temp, pos);
 904	}
 905
 906	/* If we didn't reap any data, see if the next expected SSN
 907	 * is next on the queue and if so, use that.
 908	 */
 909	if (event == NULL && pos != (struct sk_buff *)lobby) {
 910		cevent = (struct sctp_ulpevent *) pos->cb;
 911		csid = cevent->stream;
 912		cssn = cevent->ssn;
 913
 914		if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
 915			sctp_ssn_next(in, csid);
 916			__skb_unlink(pos, lobby);
 917			__skb_queue_tail(&temp, pos);
 918			event = sctp_skb2event(pos);
 919		}
 920	}
 921
 922	/* Send event to the ULP.  'event' is the sctp_ulpevent for
 923	 * very first SKB on the 'temp' list.
 924	 */
 925	if (event) {
 926		/* see if we have more ordered that we can deliver */
 927		sctp_ulpq_retrieve_ordered(ulpq, event);
 928		sctp_ulpq_tail_event(ulpq, event);
 929	}
 930}
 931
 932/* Skip over an SSN. This is used during the processing of
 933 * Forwared TSN chunk to skip over the abandoned ordered data
 934 */
 935void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 936{
 937	struct sctp_stream *in;
 938
 939	/* Note: The stream ID must be verified before this routine.  */
 940	in  = &ulpq->asoc->ssnmap->in;
 941
 942	/* Is this an old SSN?  If so ignore. */
 943	if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
 944		return;
 945
 946	/* Mark that we are no longer expecting this SSN or lower. */
 947	sctp_ssn_skip(in, sid, ssn);
 948
 949	/* Go find any other chunks that were waiting for
 950	 * ordering and deliver them if needed.
 951	 */
 952	sctp_ulpq_reap_ordered(ulpq, sid);
 953}
 954
 955static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
 956		struct sk_buff_head *list, __u16 needed)
 957{
 958	__u16 freed = 0;
 959	__u32 tsn;
 960	struct sk_buff *skb;
 961	struct sctp_ulpevent *event;
 962	struct sctp_tsnmap *tsnmap;
 963
 964	tsnmap = &ulpq->asoc->peer.tsn_map;
 965
 966	while ((skb = __skb_dequeue_tail(list)) != NULL) {
 967		freed += skb_headlen(skb);
 968		event = sctp_skb2event(skb);
 969		tsn = event->tsn;
 970
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 971		sctp_ulpevent_free(event);
 972		sctp_tsnmap_renege(tsnmap, tsn);
 
 
 
 973		if (freed >= needed)
 974			return freed;
 975	}
 976
 977	return freed;
 978}
 979
 980/* Renege 'needed' bytes from the ordering queue. */
 981static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
 982{
 983	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
 984}
 985
 986/* Renege 'needed' bytes from the reassembly queue. */
 987static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
 988{
 989	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
 990}
 991
 992/* Partial deliver the first message as there is pressure on rwnd. */
 993void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
 994				struct sctp_chunk *chunk,
 995				gfp_t gfp)
 996{
 997	struct sctp_ulpevent *event;
 998	struct sctp_association *asoc;
 999	struct sctp_sock *sp;
 
 
1000
1001	asoc = ulpq->asoc;
1002	sp = sctp_sk(asoc->base.sk);
1003
1004	/* If the association is already in Partial Delivery mode
1005	 * we have noting to do.
1006	 */
1007	if (ulpq->pd_mode)
1008		return;
1009
 
 
 
 
 
 
 
 
 
 
1010	/* If the user enabled fragment interleave socket option,
1011	 * multiple associations can enter partial delivery.
1012	 * Otherwise, we can only enter partial delivery if the
1013	 * socket is not in partial deliver mode.
1014	 */
1015	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1016		/* Is partial delivery possible?  */
1017		event = sctp_ulpq_retrieve_first(ulpq);
1018		/* Send event to the ULP.   */
1019		if (event) {
1020			sctp_ulpq_tail_event(ulpq, event);
 
 
 
 
1021			sctp_ulpq_set_pd(ulpq);
1022			return;
1023		}
1024	}
1025}
1026
1027/* Renege some packets to make room for an incoming chunk.  */
1028void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1029		      gfp_t gfp)
1030{
1031	struct sctp_association *asoc;
1032	__u16 needed, freed;
1033
1034	asoc = ulpq->asoc;
1035
1036	if (chunk) {
1037		needed = ntohs(chunk->chunk_hdr->length);
1038		needed -= sizeof(sctp_data_chunk_t);
1039	} else
1040		needed = SCTP_DEFAULT_MAXWINDOW;
1041
1042	freed = 0;
1043
1044	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1045		freed = sctp_ulpq_renege_order(ulpq, needed);
1046		if (freed < needed) {
1047			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1048		}
1049	}
1050	/* If able to free enough room, accept this chunk. */
1051	if (chunk && (freed >= needed)) {
1052		__u32 tsn;
1053		tsn = ntohl(chunk->subh.data_hdr->tsn);
1054		sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn, chunk->transport);
1055		sctp_ulpq_tail_data(ulpq, chunk, gfp);
1056
1057		sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
 
 
 
 
1058	}
1059
1060	sk_mem_reclaim(asoc->base.sk);
1061}
1062
1063
1064
1065/* Notify the application if an association is aborted and in
1066 * partial delivery mode.  Send up any pending received messages.
1067 */
1068void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1069{
1070	struct sctp_ulpevent *ev = NULL;
 
1071	struct sock *sk;
1072
1073	if (!ulpq->pd_mode)
1074		return;
1075
1076	sk = ulpq->asoc->base.sk;
1077	if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1078				       &sctp_sk(sk)->subscribe))
 
1079		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1080					      SCTP_PARTIAL_DELIVERY_ABORTED,
1081					      gfp);
1082	if (ev)
1083		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1084
1085	/* If there is data waiting, send it up the socket now. */
1086	if (sctp_ulpq_clear_pd(ulpq) || ev)
1087		sk->sk_data_ready(sk, 0);
 
 
1088}
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/* SCTP kernel implementation
   3 * (C) Copyright IBM Corp. 2001, 2004
   4 * Copyright (c) 1999-2000 Cisco, Inc.
   5 * Copyright (c) 1999-2001 Motorola, Inc.
   6 * Copyright (c) 2001 Intel Corp.
   7 * Copyright (c) 2001 Nokia, Inc.
   8 * Copyright (c) 2001 La Monte H.P. Yarroll
   9 *
  10 * This abstraction carries sctp events to the ULP (sockets).
  11 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  12 * Please send any bug reports or fixes you make to the
  13 * email address(es):
  14 *    lksctp developers <linux-sctp@vger.kernel.org>
 
 
 
  15 *
  16 * Written or modified by:
  17 *    Jon Grimm             <jgrimm@us.ibm.com>
  18 *    La Monte H.P. Yarroll <piggy@acm.org>
  19 *    Sridhar Samudrala     <sri@us.ibm.com>
 
 
 
  20 */
  21
  22#include <linux/slab.h>
  23#include <linux/types.h>
  24#include <linux/skbuff.h>
  25#include <net/sock.h>
  26#include <net/busy_poll.h>
  27#include <net/sctp/structs.h>
  28#include <net/sctp/sctp.h>
  29#include <net/sctp/sm.h>
  30
  31/* Forward declarations for internal helpers.  */
  32static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
  33					      struct sctp_ulpevent *);
  34static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
  35					      struct sctp_ulpevent *);
  36static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
  37
  38/* 1st Level Abstractions */
  39
  40/* Initialize a ULP queue from a block of memory.  */
  41struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
  42				 struct sctp_association *asoc)
  43{
  44	memset(ulpq, 0, sizeof(struct sctp_ulpq));
  45
  46	ulpq->asoc = asoc;
  47	skb_queue_head_init(&ulpq->reasm);
  48	skb_queue_head_init(&ulpq->reasm_uo);
  49	skb_queue_head_init(&ulpq->lobby);
  50	ulpq->pd_mode  = 0;
 
  51
  52	return ulpq;
  53}
  54
  55
  56/* Flush the reassembly and ordering queues.  */
  57void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
  58{
  59	struct sk_buff *skb;
  60	struct sctp_ulpevent *event;
  61
  62	while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
  63		event = sctp_skb2event(skb);
  64		sctp_ulpevent_free(event);
  65	}
  66
  67	while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
  68		event = sctp_skb2event(skb);
  69		sctp_ulpevent_free(event);
  70	}
  71
  72	while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
  73		event = sctp_skb2event(skb);
  74		sctp_ulpevent_free(event);
  75	}
  76}
  77
  78/* Dispose of a ulpqueue.  */
  79void sctp_ulpq_free(struct sctp_ulpq *ulpq)
  80{
  81	sctp_ulpq_flush(ulpq);
 
 
  82}
  83
  84/* Process an incoming DATA chunk.  */
  85int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
  86			gfp_t gfp)
  87{
  88	struct sk_buff_head temp;
  89	struct sctp_ulpevent *event;
  90	int event_eor = 0;
  91
  92	/* Create an event from the incoming chunk. */
  93	event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
  94	if (!event)
  95		return -ENOMEM;
  96
  97	event->ssn = ntohs(chunk->subh.data_hdr->ssn);
  98	event->ppid = chunk->subh.data_hdr->ppid;
  99
 100	/* Do reassembly if needed.  */
 101	event = sctp_ulpq_reasm(ulpq, event);
 102
 103	/* Do ordering if needed.  */
 104	if (event) {
 105		/* Create a temporary list to collect chunks on.  */
 106		skb_queue_head_init(&temp);
 107		__skb_queue_tail(&temp, sctp_event2skb(event));
 108
 109		if (event->msg_flags & MSG_EOR)
 110			event = sctp_ulpq_order(ulpq, event);
 111	}
 112
 113	/* Send event to the ULP.  'event' is the sctp_ulpevent for
 114	 * very first SKB on the 'temp' list.
 115	 */
 116	if (event) {
 117		event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
 118		sctp_ulpq_tail_event(ulpq, &temp);
 119	}
 120
 121	return event_eor;
 122}
 123
 124/* Add a new event for propagation to the ULP.  */
 125/* Clear the partial delivery mode for this socket.   Note: This
 126 * assumes that no association is currently in partial delivery mode.
 127 */
 128int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
 129{
 130	struct sctp_sock *sp = sctp_sk(sk);
 131
 132	if (atomic_dec_and_test(&sp->pd_mode)) {
 133		/* This means there are no other associations in PD, so
 134		 * we can go ahead and clear out the lobby in one shot
 135		 */
 136		if (!skb_queue_empty(&sp->pd_lobby)) {
 137			skb_queue_splice_tail_init(&sp->pd_lobby,
 138						   &sk->sk_receive_queue);
 
 
 139			return 1;
 140		}
 141	} else {
 142		/* There are other associations in PD, so we only need to
 143		 * pull stuff out of the lobby that belongs to the
 144		 * associations that is exiting PD (all of its notifications
 145		 * are posted here).
 146		 */
 147		if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
 148			struct sk_buff *skb, *tmp;
 149			struct sctp_ulpevent *event;
 150
 151			sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
 152				event = sctp_skb2event(skb);
 153				if (event->asoc == asoc) {
 154					__skb_unlink(skb, &sp->pd_lobby);
 155					__skb_queue_tail(&sk->sk_receive_queue,
 156							 skb);
 157				}
 158			}
 159		}
 160	}
 161
 162	return 0;
 163}
 164
 165/* Set the pd_mode on the socket and ulpq */
 166static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
 167{
 168	struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
 169
 170	atomic_inc(&sp->pd_mode);
 171	ulpq->pd_mode = 1;
 172}
 173
 174/* Clear the pd_mode and restart any pending messages waiting for delivery. */
 175static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
 176{
 177	ulpq->pd_mode = 0;
 178	sctp_ulpq_reasm_drain(ulpq);
 179	return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
 180}
 181
 182int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sk_buff_head *skb_list)
 
 
 
 183{
 184	struct sock *sk = ulpq->asoc->base.sk;
 185	struct sctp_sock *sp = sctp_sk(sk);
 186	struct sctp_ulpevent *event;
 187	struct sk_buff_head *queue;
 188	struct sk_buff *skb;
 189	int clear_pd = 0;
 190
 191	skb = __skb_peek(skb_list);
 192	event = sctp_skb2event(skb);
 193
 194	/* If the socket is just going to throw this away, do not
 195	 * even try to deliver it.
 196	 */
 197	if (sk->sk_shutdown & RCV_SHUTDOWN &&
 198	    (sk->sk_shutdown & SEND_SHUTDOWN ||
 199	     !sctp_ulpevent_is_notification(event)))
 200		goto out_free;
 201
 202	if (!sctp_ulpevent_is_notification(event)) {
 203		sk_mark_napi_id(sk, skb);
 204		sk_incoming_cpu_update(sk);
 205	}
 206	/* Check if the user wishes to receive this event.  */
 207	if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
 208		goto out_free;
 209
 210	/* If we are in partial delivery mode, post to the lobby until
 211	 * partial delivery is cleared, unless, of course _this_ is
 212	 * the association the cause of the partial delivery.
 213	 */
 214
 215	if (atomic_read(&sp->pd_mode) == 0) {
 216		queue = &sk->sk_receive_queue;
 217	} else {
 218		if (ulpq->pd_mode) {
 219			/* If the association is in partial delivery, we
 220			 * need to finish delivering the partially processed
 221			 * packet before passing any other data.  This is
 222			 * because we don't truly support stream interleaving.
 223			 */
 224			if ((event->msg_flags & MSG_NOTIFICATION) ||
 225			    (SCTP_DATA_NOT_FRAG ==
 226				    (event->msg_flags & SCTP_DATA_FRAG_MASK)))
 227				queue = &sp->pd_lobby;
 228			else {
 229				clear_pd = event->msg_flags & MSG_EOR;
 230				queue = &sk->sk_receive_queue;
 231			}
 232		} else {
 233			/*
 234			 * If fragment interleave is enabled, we
 235			 * can queue this to the receive queue instead
 236			 * of the lobby.
 237			 */
 238			if (sp->frag_interleave)
 239				queue = &sk->sk_receive_queue;
 240			else
 241				queue = &sp->pd_lobby;
 242		}
 243	}
 244
 245	skb_queue_splice_tail_init(skb_list, queue);
 
 
 
 
 
 
 246
 247	/* Did we just complete partial delivery and need to get
 248	 * rolling again?  Move pending data to the receive
 249	 * queue.
 250	 */
 251	if (clear_pd)
 252		sctp_ulpq_clear_pd(ulpq);
 253
 254	if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
 255		if (!sock_owned_by_user(sk))
 256			sp->data_ready_signalled = 1;
 257		sk->sk_data_ready(sk);
 258	}
 259	return 1;
 260
 261out_free:
 262	if (skb_list)
 263		sctp_queue_purge_ulpevents(skb_list);
 264	else
 265		sctp_ulpevent_free(event);
 266
 267	return 0;
 268}
 269
 270/* 2nd Level Abstractions */
 271
 272/* Helper function to store chunks that need to be reassembled.  */
 273static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
 274					 struct sctp_ulpevent *event)
 275{
 276	struct sk_buff *pos;
 277	struct sctp_ulpevent *cevent;
 278	__u32 tsn, ctsn;
 279
 280	tsn = event->tsn;
 281
 282	/* See if it belongs at the end. */
 283	pos = skb_peek_tail(&ulpq->reasm);
 284	if (!pos) {
 285		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 286		return;
 287	}
 288
 289	/* Short circuit just dropping it at the end. */
 290	cevent = sctp_skb2event(pos);
 291	ctsn = cevent->tsn;
 292	if (TSN_lt(ctsn, tsn)) {
 293		__skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
 294		return;
 295	}
 296
 297	/* Find the right place in this list. We store them by TSN.  */
 298	skb_queue_walk(&ulpq->reasm, pos) {
 299		cevent = sctp_skb2event(pos);
 300		ctsn = cevent->tsn;
 301
 302		if (TSN_lt(tsn, ctsn))
 303			break;
 304	}
 305
 306	/* Insert before pos. */
 307	__skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
 308
 309}
 310
 311/* Helper function to return an event corresponding to the reassembled
 312 * datagram.
 313 * This routine creates a re-assembled skb given the first and last skb's
 314 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
 315 * payload was fragmented on the way and ip had to reassemble them.
 316 * We add the rest of skb's to the first skb's fraglist.
 317 */
 318struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
 319						  struct sk_buff_head *queue,
 320						  struct sk_buff *f_frag,
 321						  struct sk_buff *l_frag)
 322{
 323	struct sk_buff *pos;
 324	struct sk_buff *new = NULL;
 325	struct sctp_ulpevent *event;
 326	struct sk_buff *pnext, *last;
 327	struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
 328
 329	/* Store the pointer to the 2nd skb */
 330	if (f_frag == l_frag)
 331		pos = NULL;
 332	else
 333		pos = f_frag->next;
 334
 335	/* Get the last skb in the f_frag's frag_list if present. */
 336	for (last = list; list; last = list, list = list->next)
 337		;
 338
 339	/* Add the list of remaining fragments to the first fragments
 340	 * frag_list.
 341	 */
 342	if (last)
 343		last->next = pos;
 344	else {
 345		if (skb_cloned(f_frag)) {
 346			/* This is a cloned skb, we can't just modify
 347			 * the frag_list.  We need a new skb to do that.
 348			 * Instead of calling skb_unshare(), we'll do it
 349			 * ourselves since we need to delay the free.
 350			 */
 351			new = skb_copy(f_frag, GFP_ATOMIC);
 352			if (!new)
 353				return NULL;	/* try again later */
 354
 355			sctp_skb_set_owner_r(new, f_frag->sk);
 356
 357			skb_shinfo(new)->frag_list = pos;
 358		} else
 359			skb_shinfo(f_frag)->frag_list = pos;
 360	}
 361
 362	/* Remove the first fragment from the reassembly queue.  */
 363	__skb_unlink(f_frag, queue);
 364
 365	/* if we did unshare, then free the old skb and re-assign */
 366	if (new) {
 367		kfree_skb(f_frag);
 368		f_frag = new;
 369	}
 370
 371	while (pos) {
 372
 373		pnext = pos->next;
 374
 375		/* Update the len and data_len fields of the first fragment. */
 376		f_frag->len += pos->len;
 377		f_frag->data_len += pos->len;
 378
 379		/* Remove the fragment from the reassembly queue.  */
 380		__skb_unlink(pos, queue);
 381
 382		/* Break if we have reached the last fragment.  */
 383		if (pos == l_frag)
 384			break;
 385		pos->next = pnext;
 386		pos = pnext;
 387	}
 388
 389	event = sctp_skb2event(f_frag);
 390	SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
 391
 392	return event;
 393}
 394
 395
 396/* Helper function to check if an incoming chunk has filled up the last
 397 * missing fragment in a SCTP datagram and return the corresponding event.
 398 */
 399static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
 400{
 401	struct sk_buff *pos;
 402	struct sctp_ulpevent *cevent;
 403	struct sk_buff *first_frag = NULL;
 404	__u32 ctsn, next_tsn;
 405	struct sctp_ulpevent *retval = NULL;
 406	struct sk_buff *pd_first = NULL;
 407	struct sk_buff *pd_last = NULL;
 408	size_t pd_len = 0;
 409	struct sctp_association *asoc;
 410	u32 pd_point;
 411
 412	/* Initialized to 0 just to avoid compiler warning message.  Will
 413	 * never be used with this value. It is referenced only after it
 414	 * is set when we find the first fragment of a message.
 415	 */
 416	next_tsn = 0;
 417
 418	/* The chunks are held in the reasm queue sorted by TSN.
 419	 * Walk through the queue sequentially and look for a sequence of
 420	 * fragmented chunks that complete a datagram.
 421	 * 'first_frag' and next_tsn are reset when we find a chunk which
 422	 * is the first fragment of a datagram. Once these 2 fields are set
 423	 * we expect to find the remaining middle fragments and the last
 424	 * fragment in order. If not, first_frag is reset to NULL and we
 425	 * start the next pass when we find another first fragment.
 426	 *
 427	 * There is a potential to do partial delivery if user sets
 428	 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
 429	 * to see if can do PD.
 430	 */
 431	skb_queue_walk(&ulpq->reasm, pos) {
 432		cevent = sctp_skb2event(pos);
 433		ctsn = cevent->tsn;
 434
 435		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 436		case SCTP_DATA_FIRST_FRAG:
 437			/* If this "FIRST_FRAG" is the first
 438			 * element in the queue, then count it towards
 439			 * possible PD.
 440			 */
 441			if (skb_queue_is_first(&ulpq->reasm, pos)) {
 442			    pd_first = pos;
 443			    pd_last = pos;
 444			    pd_len = pos->len;
 445			} else {
 446			    pd_first = NULL;
 447			    pd_last = NULL;
 448			    pd_len = 0;
 449			}
 450
 451			first_frag = pos;
 452			next_tsn = ctsn + 1;
 453			break;
 454
 455		case SCTP_DATA_MIDDLE_FRAG:
 456			if ((first_frag) && (ctsn == next_tsn)) {
 457				next_tsn++;
 458				if (pd_first) {
 459				    pd_last = pos;
 460				    pd_len += pos->len;
 461				}
 462			} else
 463				first_frag = NULL;
 464			break;
 465
 466		case SCTP_DATA_LAST_FRAG:
 467			if (first_frag && (ctsn == next_tsn))
 468				goto found;
 469			else
 470				first_frag = NULL;
 471			break;
 472		}
 473	}
 474
 475	asoc = ulpq->asoc;
 476	if (pd_first) {
 477		/* Make sure we can enter partial deliver.
 478		 * We can trigger partial delivery only if framgent
 479		 * interleave is set, or the socket is not already
 480		 * in  partial delivery.
 481		 */
 482		if (!sctp_sk(asoc->base.sk)->frag_interleave &&
 483		    atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
 484			goto done;
 485
 486		cevent = sctp_skb2event(pd_first);
 487		pd_point = sctp_sk(asoc->base.sk)->pd_point;
 488		if (pd_point && pd_point <= pd_len) {
 489			retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
 490							     &ulpq->reasm,
 491							     pd_first,
 492							     pd_last);
 493			if (retval)
 494				sctp_ulpq_set_pd(ulpq);
 495		}
 496	}
 497done:
 498	return retval;
 499found:
 500	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 501					     &ulpq->reasm, first_frag, pos);
 502	if (retval)
 503		retval->msg_flags |= MSG_EOR;
 504	goto done;
 505}
 506
 507/* Retrieve the next set of fragments of a partial message. */
 508static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
 509{
 510	struct sk_buff *pos, *last_frag, *first_frag;
 511	struct sctp_ulpevent *cevent;
 512	__u32 ctsn, next_tsn;
 513	int is_last;
 514	struct sctp_ulpevent *retval;
 515
 516	/* The chunks are held in the reasm queue sorted by TSN.
 517	 * Walk through the queue sequentially and look for the first
 518	 * sequence of fragmented chunks.
 519	 */
 520
 521	if (skb_queue_empty(&ulpq->reasm))
 522		return NULL;
 523
 524	last_frag = first_frag = NULL;
 525	retval = NULL;
 526	next_tsn = 0;
 527	is_last = 0;
 528
 529	skb_queue_walk(&ulpq->reasm, pos) {
 530		cevent = sctp_skb2event(pos);
 531		ctsn = cevent->tsn;
 532
 533		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 534		case SCTP_DATA_FIRST_FRAG:
 535			if (!first_frag)
 536				return NULL;
 537			goto done;
 538		case SCTP_DATA_MIDDLE_FRAG:
 539			if (!first_frag) {
 540				first_frag = pos;
 541				next_tsn = ctsn + 1;
 542				last_frag = pos;
 543			} else if (next_tsn == ctsn) {
 544				next_tsn++;
 545				last_frag = pos;
 546			} else
 547				goto done;
 548			break;
 549		case SCTP_DATA_LAST_FRAG:
 550			if (!first_frag)
 551				first_frag = pos;
 552			else if (ctsn != next_tsn)
 553				goto done;
 554			last_frag = pos;
 555			is_last = 1;
 556			goto done;
 557		default:
 558			return NULL;
 559		}
 560	}
 561
 562	/* We have the reassembled event. There is no need to look
 563	 * further.
 564	 */
 565done:
 566	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 567					&ulpq->reasm, first_frag, last_frag);
 568	if (retval && is_last)
 569		retval->msg_flags |= MSG_EOR;
 570
 571	return retval;
 572}
 573
 574
 575/* Helper function to reassemble chunks.  Hold chunks on the reasm queue that
 576 * need reassembling.
 577 */
 578static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
 579						struct sctp_ulpevent *event)
 580{
 581	struct sctp_ulpevent *retval = NULL;
 582
 583	/* Check if this is part of a fragmented message.  */
 584	if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
 585		event->msg_flags |= MSG_EOR;
 586		return event;
 587	}
 588
 589	sctp_ulpq_store_reasm(ulpq, event);
 590	if (!ulpq->pd_mode)
 591		retval = sctp_ulpq_retrieve_reassembled(ulpq);
 592	else {
 593		__u32 ctsn, ctsnap;
 594
 595		/* Do not even bother unless this is the next tsn to
 596		 * be delivered.
 597		 */
 598		ctsn = event->tsn;
 599		ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
 600		if (TSN_lte(ctsn, ctsnap))
 601			retval = sctp_ulpq_retrieve_partial(ulpq);
 602	}
 603
 604	return retval;
 605}
 606
 607/* Retrieve the first part (sequential fragments) for partial delivery.  */
 608static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
 609{
 610	struct sk_buff *pos, *last_frag, *first_frag;
 611	struct sctp_ulpevent *cevent;
 612	__u32 ctsn, next_tsn;
 613	struct sctp_ulpevent *retval;
 614
 615	/* The chunks are held in the reasm queue sorted by TSN.
 616	 * Walk through the queue sequentially and look for a sequence of
 617	 * fragmented chunks that start a datagram.
 618	 */
 619
 620	if (skb_queue_empty(&ulpq->reasm))
 621		return NULL;
 622
 623	last_frag = first_frag = NULL;
 624	retval = NULL;
 625	next_tsn = 0;
 626
 627	skb_queue_walk(&ulpq->reasm, pos) {
 628		cevent = sctp_skb2event(pos);
 629		ctsn = cevent->tsn;
 630
 631		switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
 632		case SCTP_DATA_FIRST_FRAG:
 633			if (!first_frag) {
 634				first_frag = pos;
 635				next_tsn = ctsn + 1;
 636				last_frag = pos;
 637			} else
 638				goto done;
 639			break;
 640
 641		case SCTP_DATA_MIDDLE_FRAG:
 642			if (!first_frag)
 643				return NULL;
 644			if (ctsn == next_tsn) {
 645				next_tsn++;
 646				last_frag = pos;
 647			} else
 648				goto done;
 649			break;
 650
 651		case SCTP_DATA_LAST_FRAG:
 652			if (!first_frag)
 653				return NULL;
 654			else
 655				goto done;
 656			break;
 657
 658		default:
 659			return NULL;
 660		}
 661	}
 662
 663	/* We have the reassembled event. There is no need to look
 664	 * further.
 665	 */
 666done:
 667	retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
 668					&ulpq->reasm, first_frag, last_frag);
 669	return retval;
 670}
 671
 672/*
 673 * Flush out stale fragments from the reassembly queue when processing
 674 * a Forward TSN.
 675 *
 676 * RFC 3758, Section 3.6
 677 *
 678 * After receiving and processing a FORWARD TSN, the data receiver MUST
 679 * take cautions in updating its re-assembly queue.  The receiver MUST
 680 * remove any partially reassembled message, which is still missing one
 681 * or more TSNs earlier than or equal to the new cumulative TSN point.
 682 * In the event that the receiver has invoked the partial delivery API,
 683 * a notification SHOULD also be generated to inform the upper layer API
 684 * that the message being partially delivered will NOT be completed.
 685 */
 686void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
 687{
 688	struct sk_buff *pos, *tmp;
 689	struct sctp_ulpevent *event;
 690	__u32 tsn;
 691
 692	if (skb_queue_empty(&ulpq->reasm))
 693		return;
 694
 695	skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
 696		event = sctp_skb2event(pos);
 697		tsn = event->tsn;
 698
 699		/* Since the entire message must be abandoned by the
 700		 * sender (item A3 in Section 3.5, RFC 3758), we can
 701		 * free all fragments on the list that are less then
 702		 * or equal to ctsn_point
 703		 */
 704		if (TSN_lte(tsn, fwd_tsn)) {
 705			__skb_unlink(pos, &ulpq->reasm);
 706			sctp_ulpevent_free(event);
 707		} else
 708			break;
 709	}
 710}
 711
 712/*
 713 * Drain the reassembly queue.  If we just cleared parted delivery, it
 714 * is possible that the reassembly queue will contain already reassembled
 715 * messages.  Retrieve any such messages and give them to the user.
 716 */
 717static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
 718{
 719	struct sctp_ulpevent *event = NULL;
 
 720
 721	if (skb_queue_empty(&ulpq->reasm))
 722		return;
 723
 724	while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
 725		struct sk_buff_head temp;
 
 
 
 726
 727		skb_queue_head_init(&temp);
 728		__skb_queue_tail(&temp, sctp_event2skb(event));
 729
 730		/* Do ordering if needed.  */
 731		if (event->msg_flags & MSG_EOR)
 732			event = sctp_ulpq_order(ulpq, event);
 
 733
 734		/* Send event to the ULP.  'event' is the
 735		 * sctp_ulpevent for  very first SKB on the  temp' list.
 736		 */
 737		if (event)
 738			sctp_ulpq_tail_event(ulpq, &temp);
 739	}
 740}
 741
 742
 743/* Helper function to gather skbs that have possibly become
 744 * ordered by an an incoming chunk.
 745 */
 746static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
 747					      struct sctp_ulpevent *event)
 748{
 749	struct sk_buff_head *event_list;
 750	struct sk_buff *pos, *tmp;
 751	struct sctp_ulpevent *cevent;
 752	struct sctp_stream *stream;
 753	__u16 sid, csid, cssn;
 754
 755	sid = event->stream;
 756	stream  = &ulpq->asoc->stream;
 757
 758	event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
 759
 760	/* We are holding the chunks by stream, by SSN.  */
 761	sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
 762		cevent = (struct sctp_ulpevent *) pos->cb;
 763		csid = cevent->stream;
 764		cssn = cevent->ssn;
 765
 766		/* Have we gone too far?  */
 767		if (csid > sid)
 768			break;
 769
 770		/* Have we not gone far enough?  */
 771		if (csid < sid)
 772			continue;
 773
 774		if (cssn != sctp_ssn_peek(stream, in, sid))
 775			break;
 776
 777		/* Found it, so mark in the stream. */
 778		sctp_ssn_next(stream, in, sid);
 779
 780		__skb_unlink(pos, &ulpq->lobby);
 781
 782		/* Attach all gathered skbs to the event.  */
 783		__skb_queue_tail(event_list, pos);
 784	}
 785}
 786
 787/* Helper function to store chunks needing ordering.  */
 788static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
 789					   struct sctp_ulpevent *event)
 790{
 791	struct sk_buff *pos;
 792	struct sctp_ulpevent *cevent;
 793	__u16 sid, csid;
 794	__u16 ssn, cssn;
 795
 796	pos = skb_peek_tail(&ulpq->lobby);
 797	if (!pos) {
 798		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 799		return;
 800	}
 801
 802	sid = event->stream;
 803	ssn = event->ssn;
 804
 805	cevent = (struct sctp_ulpevent *) pos->cb;
 806	csid = cevent->stream;
 807	cssn = cevent->ssn;
 808	if (sid > csid) {
 809		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 810		return;
 811	}
 812
 813	if ((sid == csid) && SSN_lt(cssn, ssn)) {
 814		__skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
 815		return;
 816	}
 817
 818	/* Find the right place in this list.  We store them by
 819	 * stream ID and then by SSN.
 820	 */
 821	skb_queue_walk(&ulpq->lobby, pos) {
 822		cevent = (struct sctp_ulpevent *) pos->cb;
 823		csid = cevent->stream;
 824		cssn = cevent->ssn;
 825
 826		if (csid > sid)
 827			break;
 828		if (csid == sid && SSN_lt(ssn, cssn))
 829			break;
 830	}
 831
 832
 833	/* Insert before pos. */
 834	__skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
 835}
 836
 837static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
 838					     struct sctp_ulpevent *event)
 839{
 840	__u16 sid, ssn;
 841	struct sctp_stream *stream;
 842
 843	/* Check if this message needs ordering.  */
 844	if (event->msg_flags & SCTP_DATA_UNORDERED)
 845		return event;
 846
 847	/* Note: The stream ID must be verified before this routine.  */
 848	sid = event->stream;
 849	ssn = event->ssn;
 850	stream  = &ulpq->asoc->stream;
 851
 852	/* Is this the expected SSN for this stream ID?  */
 853	if (ssn != sctp_ssn_peek(stream, in, sid)) {
 854		/* We've received something out of order, so find where it
 855		 * needs to be placed.  We order by stream and then by SSN.
 856		 */
 857		sctp_ulpq_store_ordered(ulpq, event);
 858		return NULL;
 859	}
 860
 861	/* Mark that the next chunk has been found.  */
 862	sctp_ssn_next(stream, in, sid);
 863
 864	/* Go find any other chunks that were waiting for
 865	 * ordering.
 866	 */
 867	sctp_ulpq_retrieve_ordered(ulpq, event);
 868
 869	return event;
 870}
 871
 872/* Helper function to gather skbs that have possibly become
 873 * ordered by forward tsn skipping their dependencies.
 874 */
 875static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
 876{
 877	struct sk_buff *pos, *tmp;
 878	struct sctp_ulpevent *cevent;
 879	struct sctp_ulpevent *event;
 880	struct sctp_stream *stream;
 881	struct sk_buff_head temp;
 882	struct sk_buff_head *lobby = &ulpq->lobby;
 883	__u16 csid, cssn;
 884
 885	stream = &ulpq->asoc->stream;
 886
 887	/* We are holding the chunks by stream, by SSN.  */
 888	skb_queue_head_init(&temp);
 889	event = NULL;
 890	sctp_skb_for_each(pos, lobby, tmp) {
 891		cevent = (struct sctp_ulpevent *) pos->cb;
 892		csid = cevent->stream;
 893		cssn = cevent->ssn;
 894
 895		/* Have we gone too far?  */
 896		if (csid > sid)
 897			break;
 898
 899		/* Have we not gone far enough?  */
 900		if (csid < sid)
 901			continue;
 902
 903		/* see if this ssn has been marked by skipping */
 904		if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
 905			break;
 906
 907		__skb_unlink(pos, lobby);
 908		if (!event)
 909			/* Create a temporary list to collect chunks on.  */
 910			event = sctp_skb2event(pos);
 911
 912		/* Attach all gathered skbs to the event.  */
 913		__skb_queue_tail(&temp, pos);
 914	}
 915
 916	/* If we didn't reap any data, see if the next expected SSN
 917	 * is next on the queue and if so, use that.
 918	 */
 919	if (event == NULL && pos != (struct sk_buff *)lobby) {
 920		cevent = (struct sctp_ulpevent *) pos->cb;
 921		csid = cevent->stream;
 922		cssn = cevent->ssn;
 923
 924		if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
 925			sctp_ssn_next(stream, in, csid);
 926			__skb_unlink(pos, lobby);
 927			__skb_queue_tail(&temp, pos);
 928			event = sctp_skb2event(pos);
 929		}
 930	}
 931
 932	/* Send event to the ULP.  'event' is the sctp_ulpevent for
 933	 * very first SKB on the 'temp' list.
 934	 */
 935	if (event) {
 936		/* see if we have more ordered that we can deliver */
 937		sctp_ulpq_retrieve_ordered(ulpq, event);
 938		sctp_ulpq_tail_event(ulpq, &temp);
 939	}
 940}
 941
 942/* Skip over an SSN. This is used during the processing of
 943 * Forwared TSN chunk to skip over the abandoned ordered data
 944 */
 945void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
 946{
 947	struct sctp_stream *stream;
 948
 949	/* Note: The stream ID must be verified before this routine.  */
 950	stream  = &ulpq->asoc->stream;
 951
 952	/* Is this an old SSN?  If so ignore. */
 953	if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
 954		return;
 955
 956	/* Mark that we are no longer expecting this SSN or lower. */
 957	sctp_ssn_skip(stream, in, sid, ssn);
 958
 959	/* Go find any other chunks that were waiting for
 960	 * ordering and deliver them if needed.
 961	 */
 962	sctp_ulpq_reap_ordered(ulpq, sid);
 963}
 964
 965__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
 966			    __u16 needed)
 967{
 968	__u16 freed = 0;
 969	__u32 tsn, last_tsn;
 970	struct sk_buff *skb, *flist, *last;
 971	struct sctp_ulpevent *event;
 972	struct sctp_tsnmap *tsnmap;
 973
 974	tsnmap = &ulpq->asoc->peer.tsn_map;
 975
 976	while ((skb = skb_peek_tail(list)) != NULL) {
 
 977		event = sctp_skb2event(skb);
 978		tsn = event->tsn;
 979
 980		/* Don't renege below the Cumulative TSN ACK Point. */
 981		if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
 982			break;
 983
 984		/* Events in ordering queue may have multiple fragments
 985		 * corresponding to additional TSNs.  Sum the total
 986		 * freed space; find the last TSN.
 987		 */
 988		freed += skb_headlen(skb);
 989		flist = skb_shinfo(skb)->frag_list;
 990		for (last = flist; flist; flist = flist->next) {
 991			last = flist;
 992			freed += skb_headlen(last);
 993		}
 994		if (last)
 995			last_tsn = sctp_skb2event(last)->tsn;
 996		else
 997			last_tsn = tsn;
 998
 999		/* Unlink the event, then renege all applicable TSNs. */
1000		__skb_unlink(skb, list);
1001		sctp_ulpevent_free(event);
1002		while (TSN_lte(tsn, last_tsn)) {
1003			sctp_tsnmap_renege(tsnmap, tsn);
1004			tsn++;
1005		}
1006		if (freed >= needed)
1007			return freed;
1008	}
1009
1010	return freed;
1011}
1012
1013/* Renege 'needed' bytes from the ordering queue. */
1014static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1015{
1016	return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1017}
1018
1019/* Renege 'needed' bytes from the reassembly queue. */
1020static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1021{
1022	return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1023}
1024
1025/* Partial deliver the first message as there is pressure on rwnd. */
1026void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
 
1027				gfp_t gfp)
1028{
1029	struct sctp_ulpevent *event;
1030	struct sctp_association *asoc;
1031	struct sctp_sock *sp;
1032	__u32 ctsn;
1033	struct sk_buff *skb;
1034
1035	asoc = ulpq->asoc;
1036	sp = sctp_sk(asoc->base.sk);
1037
1038	/* If the association is already in Partial Delivery mode
1039	 * we have nothing to do.
1040	 */
1041	if (ulpq->pd_mode)
1042		return;
1043
1044	/* Data must be at or below the Cumulative TSN ACK Point to
1045	 * start partial delivery.
1046	 */
1047	skb = skb_peek(&asoc->ulpq.reasm);
1048	if (skb != NULL) {
1049		ctsn = sctp_skb2event(skb)->tsn;
1050		if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1051			return;
1052	}
1053
1054	/* If the user enabled fragment interleave socket option,
1055	 * multiple associations can enter partial delivery.
1056	 * Otherwise, we can only enter partial delivery if the
1057	 * socket is not in partial deliver mode.
1058	 */
1059	if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1060		/* Is partial delivery possible?  */
1061		event = sctp_ulpq_retrieve_first(ulpq);
1062		/* Send event to the ULP.   */
1063		if (event) {
1064			struct sk_buff_head temp;
1065
1066			skb_queue_head_init(&temp);
1067			__skb_queue_tail(&temp, sctp_event2skb(event));
1068			sctp_ulpq_tail_event(ulpq, &temp);
1069			sctp_ulpq_set_pd(ulpq);
1070			return;
1071		}
1072	}
1073}
1074
1075/* Renege some packets to make room for an incoming chunk.  */
1076void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1077		      gfp_t gfp)
1078{
1079	struct sctp_association *asoc = ulpq->asoc;
1080	__u32 freed = 0;
1081	__u16 needed;
 
1082
1083	needed = ntohs(chunk->chunk_hdr->length) -
1084		 sizeof(struct sctp_data_chunk);
 
 
 
 
 
1085
1086	if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1087		freed = sctp_ulpq_renege_order(ulpq, needed);
1088		if (freed < needed)
1089			freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
 
1090	}
1091	/* If able to free enough room, accept this chunk. */
1092	if (sk_rmem_schedule(asoc->base.sk, chunk->skb, needed) &&
1093	    freed >= needed) {
1094		int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1095		/*
1096		 * Enter partial delivery if chunk has not been
1097		 * delivered; otherwise, drain the reassembly queue.
1098		 */
1099		if (retval <= 0)
1100			sctp_ulpq_partial_delivery(ulpq, gfp);
1101		else if (retval == 1)
1102			sctp_ulpq_reasm_drain(ulpq);
1103	}
1104
1105	sk_mem_reclaim(asoc->base.sk);
1106}
1107
1108
1109
1110/* Notify the application if an association is aborted and in
1111 * partial delivery mode.  Send up any pending received messages.
1112 */
1113void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1114{
1115	struct sctp_ulpevent *ev = NULL;
1116	struct sctp_sock *sp;
1117	struct sock *sk;
1118
1119	if (!ulpq->pd_mode)
1120		return;
1121
1122	sk = ulpq->asoc->base.sk;
1123	sp = sctp_sk(sk);
1124	if (sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
1125				       SCTP_PARTIAL_DELIVERY_EVENT))
1126		ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1127					      SCTP_PARTIAL_DELIVERY_ABORTED,
1128					      0, 0, 0, gfp);
1129	if (ev)
1130		__skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1131
1132	/* If there is data waiting, send it up the socket now. */
1133	if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1134		sp->data_ready_signalled = 1;
1135		sk->sk_data_ready(sk);
1136	}
1137}