Loading...
1/* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This abstraction carries sctp events to the ULP (sockets).
10 *
11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This SCTP implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, see
25 * <http://www.gnu.org/licenses/>.
26 *
27 * Please send any bug reports or fixes you make to the
28 * email address(es):
29 * lksctp developers <linux-sctp@vger.kernel.org>
30 *
31 * Written or modified by:
32 * Jon Grimm <jgrimm@us.ibm.com>
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Sridhar Samudrala <sri@us.ibm.com>
35 */
36
37#include <linux/slab.h>
38#include <linux/types.h>
39#include <linux/skbuff.h>
40#include <net/sock.h>
41#include <net/busy_poll.h>
42#include <net/sctp/structs.h>
43#include <net/sctp/sctp.h>
44#include <net/sctp/sm.h>
45
46/* Forward declarations for internal helpers. */
47static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
48 struct sctp_ulpevent *);
49static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
50 struct sctp_ulpevent *);
51static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
52
53/* 1st Level Abstractions */
54
55/* Initialize a ULP queue from a block of memory. */
56struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
57 struct sctp_association *asoc)
58{
59 memset(ulpq, 0, sizeof(struct sctp_ulpq));
60
61 ulpq->asoc = asoc;
62 skb_queue_head_init(&ulpq->reasm);
63 skb_queue_head_init(&ulpq->reasm_uo);
64 skb_queue_head_init(&ulpq->lobby);
65 ulpq->pd_mode = 0;
66
67 return ulpq;
68}
69
70
71/* Flush the reassembly and ordering queues. */
72void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
73{
74 struct sk_buff *skb;
75 struct sctp_ulpevent *event;
76
77 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
78 event = sctp_skb2event(skb);
79 sctp_ulpevent_free(event);
80 }
81
82 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
83 event = sctp_skb2event(skb);
84 sctp_ulpevent_free(event);
85 }
86
87 while ((skb = __skb_dequeue(&ulpq->reasm_uo)) != NULL) {
88 event = sctp_skb2event(skb);
89 sctp_ulpevent_free(event);
90 }
91}
92
93/* Dispose of a ulpqueue. */
94void sctp_ulpq_free(struct sctp_ulpq *ulpq)
95{
96 sctp_ulpq_flush(ulpq);
97}
98
99/* Process an incoming DATA chunk. */
100int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
101 gfp_t gfp)
102{
103 struct sk_buff_head temp;
104 struct sctp_ulpevent *event;
105 int event_eor = 0;
106
107 /* Create an event from the incoming chunk. */
108 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
109 if (!event)
110 return -ENOMEM;
111
112 event->ssn = ntohs(chunk->subh.data_hdr->ssn);
113 event->ppid = chunk->subh.data_hdr->ppid;
114
115 /* Do reassembly if needed. */
116 event = sctp_ulpq_reasm(ulpq, event);
117
118 /* Do ordering if needed. */
119 if ((event) && (event->msg_flags & MSG_EOR)) {
120 /* Create a temporary list to collect chunks on. */
121 skb_queue_head_init(&temp);
122 __skb_queue_tail(&temp, sctp_event2skb(event));
123
124 event = sctp_ulpq_order(ulpq, event);
125 }
126
127 /* Send event to the ULP. 'event' is the sctp_ulpevent for
128 * very first SKB on the 'temp' list.
129 */
130 if (event) {
131 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
132 sctp_ulpq_tail_event(ulpq, event);
133 }
134
135 return event_eor;
136}
137
138/* Add a new event for propagation to the ULP. */
139/* Clear the partial delivery mode for this socket. Note: This
140 * assumes that no association is currently in partial delivery mode.
141 */
142int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
143{
144 struct sctp_sock *sp = sctp_sk(sk);
145
146 if (atomic_dec_and_test(&sp->pd_mode)) {
147 /* This means there are no other associations in PD, so
148 * we can go ahead and clear out the lobby in one shot
149 */
150 if (!skb_queue_empty(&sp->pd_lobby)) {
151 skb_queue_splice_tail_init(&sp->pd_lobby,
152 &sk->sk_receive_queue);
153 return 1;
154 }
155 } else {
156 /* There are other associations in PD, so we only need to
157 * pull stuff out of the lobby that belongs to the
158 * associations that is exiting PD (all of its notifications
159 * are posted here).
160 */
161 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
162 struct sk_buff *skb, *tmp;
163 struct sctp_ulpevent *event;
164
165 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
166 event = sctp_skb2event(skb);
167 if (event->asoc == asoc) {
168 __skb_unlink(skb, &sp->pd_lobby);
169 __skb_queue_tail(&sk->sk_receive_queue,
170 skb);
171 }
172 }
173 }
174 }
175
176 return 0;
177}
178
179/* Set the pd_mode on the socket and ulpq */
180static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
181{
182 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
183
184 atomic_inc(&sp->pd_mode);
185 ulpq->pd_mode = 1;
186}
187
188/* Clear the pd_mode and restart any pending messages waiting for delivery. */
189static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
190{
191 ulpq->pd_mode = 0;
192 sctp_ulpq_reasm_drain(ulpq);
193 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
194}
195
196/* If the SKB of 'event' is on a list, it is the first such member
197 * of that list.
198 */
199int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
200{
201 struct sock *sk = ulpq->asoc->base.sk;
202 struct sctp_sock *sp = sctp_sk(sk);
203 struct sk_buff_head *queue, *skb_list;
204 struct sk_buff *skb = sctp_event2skb(event);
205 int clear_pd = 0;
206
207 skb_list = (struct sk_buff_head *) skb->prev;
208
209 /* If the socket is just going to throw this away, do not
210 * even try to deliver it.
211 */
212 if (sk->sk_shutdown & RCV_SHUTDOWN &&
213 (sk->sk_shutdown & SEND_SHUTDOWN ||
214 !sctp_ulpevent_is_notification(event)))
215 goto out_free;
216
217 if (!sctp_ulpevent_is_notification(event)) {
218 sk_mark_napi_id(sk, skb);
219 sk_incoming_cpu_update(sk);
220 }
221 /* Check if the user wishes to receive this event. */
222 if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
223 goto out_free;
224
225 /* If we are in partial delivery mode, post to the lobby until
226 * partial delivery is cleared, unless, of course _this_ is
227 * the association the cause of the partial delivery.
228 */
229
230 if (atomic_read(&sp->pd_mode) == 0) {
231 queue = &sk->sk_receive_queue;
232 } else {
233 if (ulpq->pd_mode) {
234 /* If the association is in partial delivery, we
235 * need to finish delivering the partially processed
236 * packet before passing any other data. This is
237 * because we don't truly support stream interleaving.
238 */
239 if ((event->msg_flags & MSG_NOTIFICATION) ||
240 (SCTP_DATA_NOT_FRAG ==
241 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
242 queue = &sp->pd_lobby;
243 else {
244 clear_pd = event->msg_flags & MSG_EOR;
245 queue = &sk->sk_receive_queue;
246 }
247 } else {
248 /*
249 * If fragment interleave is enabled, we
250 * can queue this to the receive queue instead
251 * of the lobby.
252 */
253 if (sp->frag_interleave)
254 queue = &sk->sk_receive_queue;
255 else
256 queue = &sp->pd_lobby;
257 }
258 }
259
260 /* If we are harvesting multiple skbs they will be
261 * collected on a list.
262 */
263 if (skb_list)
264 skb_queue_splice_tail_init(skb_list, queue);
265 else
266 __skb_queue_tail(queue, skb);
267
268 /* Did we just complete partial delivery and need to get
269 * rolling again? Move pending data to the receive
270 * queue.
271 */
272 if (clear_pd)
273 sctp_ulpq_clear_pd(ulpq);
274
275 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) {
276 if (!sock_owned_by_user(sk))
277 sp->data_ready_signalled = 1;
278 sk->sk_data_ready(sk);
279 }
280 return 1;
281
282out_free:
283 if (skb_list)
284 sctp_queue_purge_ulpevents(skb_list);
285 else
286 sctp_ulpevent_free(event);
287
288 return 0;
289}
290
291/* 2nd Level Abstractions */
292
293/* Helper function to store chunks that need to be reassembled. */
294static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
295 struct sctp_ulpevent *event)
296{
297 struct sk_buff *pos;
298 struct sctp_ulpevent *cevent;
299 __u32 tsn, ctsn;
300
301 tsn = event->tsn;
302
303 /* See if it belongs at the end. */
304 pos = skb_peek_tail(&ulpq->reasm);
305 if (!pos) {
306 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
307 return;
308 }
309
310 /* Short circuit just dropping it at the end. */
311 cevent = sctp_skb2event(pos);
312 ctsn = cevent->tsn;
313 if (TSN_lt(ctsn, tsn)) {
314 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
315 return;
316 }
317
318 /* Find the right place in this list. We store them by TSN. */
319 skb_queue_walk(&ulpq->reasm, pos) {
320 cevent = sctp_skb2event(pos);
321 ctsn = cevent->tsn;
322
323 if (TSN_lt(tsn, ctsn))
324 break;
325 }
326
327 /* Insert before pos. */
328 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
329
330}
331
332/* Helper function to return an event corresponding to the reassembled
333 * datagram.
334 * This routine creates a re-assembled skb given the first and last skb's
335 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
336 * payload was fragmented on the way and ip had to reassemble them.
337 * We add the rest of skb's to the first skb's fraglist.
338 */
339struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
340 struct sk_buff_head *queue,
341 struct sk_buff *f_frag,
342 struct sk_buff *l_frag)
343{
344 struct sk_buff *pos;
345 struct sk_buff *new = NULL;
346 struct sctp_ulpevent *event;
347 struct sk_buff *pnext, *last;
348 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
349
350 /* Store the pointer to the 2nd skb */
351 if (f_frag == l_frag)
352 pos = NULL;
353 else
354 pos = f_frag->next;
355
356 /* Get the last skb in the f_frag's frag_list if present. */
357 for (last = list; list; last = list, list = list->next)
358 ;
359
360 /* Add the list of remaining fragments to the first fragments
361 * frag_list.
362 */
363 if (last)
364 last->next = pos;
365 else {
366 if (skb_cloned(f_frag)) {
367 /* This is a cloned skb, we can't just modify
368 * the frag_list. We need a new skb to do that.
369 * Instead of calling skb_unshare(), we'll do it
370 * ourselves since we need to delay the free.
371 */
372 new = skb_copy(f_frag, GFP_ATOMIC);
373 if (!new)
374 return NULL; /* try again later */
375
376 sctp_skb_set_owner_r(new, f_frag->sk);
377
378 skb_shinfo(new)->frag_list = pos;
379 } else
380 skb_shinfo(f_frag)->frag_list = pos;
381 }
382
383 /* Remove the first fragment from the reassembly queue. */
384 __skb_unlink(f_frag, queue);
385
386 /* if we did unshare, then free the old skb and re-assign */
387 if (new) {
388 kfree_skb(f_frag);
389 f_frag = new;
390 }
391
392 while (pos) {
393
394 pnext = pos->next;
395
396 /* Update the len and data_len fields of the first fragment. */
397 f_frag->len += pos->len;
398 f_frag->data_len += pos->len;
399
400 /* Remove the fragment from the reassembly queue. */
401 __skb_unlink(pos, queue);
402
403 /* Break if we have reached the last fragment. */
404 if (pos == l_frag)
405 break;
406 pos->next = pnext;
407 pos = pnext;
408 }
409
410 event = sctp_skb2event(f_frag);
411 SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
412
413 return event;
414}
415
416
417/* Helper function to check if an incoming chunk has filled up the last
418 * missing fragment in a SCTP datagram and return the corresponding event.
419 */
420static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
421{
422 struct sk_buff *pos;
423 struct sctp_ulpevent *cevent;
424 struct sk_buff *first_frag = NULL;
425 __u32 ctsn, next_tsn;
426 struct sctp_ulpevent *retval = NULL;
427 struct sk_buff *pd_first = NULL;
428 struct sk_buff *pd_last = NULL;
429 size_t pd_len = 0;
430 struct sctp_association *asoc;
431 u32 pd_point;
432
433 /* Initialized to 0 just to avoid compiler warning message. Will
434 * never be used with this value. It is referenced only after it
435 * is set when we find the first fragment of a message.
436 */
437 next_tsn = 0;
438
439 /* The chunks are held in the reasm queue sorted by TSN.
440 * Walk through the queue sequentially and look for a sequence of
441 * fragmented chunks that complete a datagram.
442 * 'first_frag' and next_tsn are reset when we find a chunk which
443 * is the first fragment of a datagram. Once these 2 fields are set
444 * we expect to find the remaining middle fragments and the last
445 * fragment in order. If not, first_frag is reset to NULL and we
446 * start the next pass when we find another first fragment.
447 *
448 * There is a potential to do partial delivery if user sets
449 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
450 * to see if can do PD.
451 */
452 skb_queue_walk(&ulpq->reasm, pos) {
453 cevent = sctp_skb2event(pos);
454 ctsn = cevent->tsn;
455
456 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
457 case SCTP_DATA_FIRST_FRAG:
458 /* If this "FIRST_FRAG" is the first
459 * element in the queue, then count it towards
460 * possible PD.
461 */
462 if (pos == ulpq->reasm.next) {
463 pd_first = pos;
464 pd_last = pos;
465 pd_len = pos->len;
466 } else {
467 pd_first = NULL;
468 pd_last = NULL;
469 pd_len = 0;
470 }
471
472 first_frag = pos;
473 next_tsn = ctsn + 1;
474 break;
475
476 case SCTP_DATA_MIDDLE_FRAG:
477 if ((first_frag) && (ctsn == next_tsn)) {
478 next_tsn++;
479 if (pd_first) {
480 pd_last = pos;
481 pd_len += pos->len;
482 }
483 } else
484 first_frag = NULL;
485 break;
486
487 case SCTP_DATA_LAST_FRAG:
488 if (first_frag && (ctsn == next_tsn))
489 goto found;
490 else
491 first_frag = NULL;
492 break;
493 }
494 }
495
496 asoc = ulpq->asoc;
497 if (pd_first) {
498 /* Make sure we can enter partial deliver.
499 * We can trigger partial delivery only if framgent
500 * interleave is set, or the socket is not already
501 * in partial delivery.
502 */
503 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
504 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
505 goto done;
506
507 cevent = sctp_skb2event(pd_first);
508 pd_point = sctp_sk(asoc->base.sk)->pd_point;
509 if (pd_point && pd_point <= pd_len) {
510 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
511 &ulpq->reasm,
512 pd_first,
513 pd_last);
514 if (retval)
515 sctp_ulpq_set_pd(ulpq);
516 }
517 }
518done:
519 return retval;
520found:
521 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
522 &ulpq->reasm, first_frag, pos);
523 if (retval)
524 retval->msg_flags |= MSG_EOR;
525 goto done;
526}
527
528/* Retrieve the next set of fragments of a partial message. */
529static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
530{
531 struct sk_buff *pos, *last_frag, *first_frag;
532 struct sctp_ulpevent *cevent;
533 __u32 ctsn, next_tsn;
534 int is_last;
535 struct sctp_ulpevent *retval;
536
537 /* The chunks are held in the reasm queue sorted by TSN.
538 * Walk through the queue sequentially and look for the first
539 * sequence of fragmented chunks.
540 */
541
542 if (skb_queue_empty(&ulpq->reasm))
543 return NULL;
544
545 last_frag = first_frag = NULL;
546 retval = NULL;
547 next_tsn = 0;
548 is_last = 0;
549
550 skb_queue_walk(&ulpq->reasm, pos) {
551 cevent = sctp_skb2event(pos);
552 ctsn = cevent->tsn;
553
554 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
555 case SCTP_DATA_FIRST_FRAG:
556 if (!first_frag)
557 return NULL;
558 goto done;
559 case SCTP_DATA_MIDDLE_FRAG:
560 if (!first_frag) {
561 first_frag = pos;
562 next_tsn = ctsn + 1;
563 last_frag = pos;
564 } else if (next_tsn == ctsn) {
565 next_tsn++;
566 last_frag = pos;
567 } else
568 goto done;
569 break;
570 case SCTP_DATA_LAST_FRAG:
571 if (!first_frag)
572 first_frag = pos;
573 else if (ctsn != next_tsn)
574 goto done;
575 last_frag = pos;
576 is_last = 1;
577 goto done;
578 default:
579 return NULL;
580 }
581 }
582
583 /* We have the reassembled event. There is no need to look
584 * further.
585 */
586done:
587 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
588 &ulpq->reasm, first_frag, last_frag);
589 if (retval && is_last)
590 retval->msg_flags |= MSG_EOR;
591
592 return retval;
593}
594
595
596/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
597 * need reassembling.
598 */
599static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
600 struct sctp_ulpevent *event)
601{
602 struct sctp_ulpevent *retval = NULL;
603
604 /* Check if this is part of a fragmented message. */
605 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
606 event->msg_flags |= MSG_EOR;
607 return event;
608 }
609
610 sctp_ulpq_store_reasm(ulpq, event);
611 if (!ulpq->pd_mode)
612 retval = sctp_ulpq_retrieve_reassembled(ulpq);
613 else {
614 __u32 ctsn, ctsnap;
615
616 /* Do not even bother unless this is the next tsn to
617 * be delivered.
618 */
619 ctsn = event->tsn;
620 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
621 if (TSN_lte(ctsn, ctsnap))
622 retval = sctp_ulpq_retrieve_partial(ulpq);
623 }
624
625 return retval;
626}
627
628/* Retrieve the first part (sequential fragments) for partial delivery. */
629static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
630{
631 struct sk_buff *pos, *last_frag, *first_frag;
632 struct sctp_ulpevent *cevent;
633 __u32 ctsn, next_tsn;
634 struct sctp_ulpevent *retval;
635
636 /* The chunks are held in the reasm queue sorted by TSN.
637 * Walk through the queue sequentially and look for a sequence of
638 * fragmented chunks that start a datagram.
639 */
640
641 if (skb_queue_empty(&ulpq->reasm))
642 return NULL;
643
644 last_frag = first_frag = NULL;
645 retval = NULL;
646 next_tsn = 0;
647
648 skb_queue_walk(&ulpq->reasm, pos) {
649 cevent = sctp_skb2event(pos);
650 ctsn = cevent->tsn;
651
652 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
653 case SCTP_DATA_FIRST_FRAG:
654 if (!first_frag) {
655 first_frag = pos;
656 next_tsn = ctsn + 1;
657 last_frag = pos;
658 } else
659 goto done;
660 break;
661
662 case SCTP_DATA_MIDDLE_FRAG:
663 if (!first_frag)
664 return NULL;
665 if (ctsn == next_tsn) {
666 next_tsn++;
667 last_frag = pos;
668 } else
669 goto done;
670 break;
671
672 case SCTP_DATA_LAST_FRAG:
673 if (!first_frag)
674 return NULL;
675 else
676 goto done;
677 break;
678
679 default:
680 return NULL;
681 }
682 }
683
684 /* We have the reassembled event. There is no need to look
685 * further.
686 */
687done:
688 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
689 &ulpq->reasm, first_frag, last_frag);
690 return retval;
691}
692
693/*
694 * Flush out stale fragments from the reassembly queue when processing
695 * a Forward TSN.
696 *
697 * RFC 3758, Section 3.6
698 *
699 * After receiving and processing a FORWARD TSN, the data receiver MUST
700 * take cautions in updating its re-assembly queue. The receiver MUST
701 * remove any partially reassembled message, which is still missing one
702 * or more TSNs earlier than or equal to the new cumulative TSN point.
703 * In the event that the receiver has invoked the partial delivery API,
704 * a notification SHOULD also be generated to inform the upper layer API
705 * that the message being partially delivered will NOT be completed.
706 */
707void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
708{
709 struct sk_buff *pos, *tmp;
710 struct sctp_ulpevent *event;
711 __u32 tsn;
712
713 if (skb_queue_empty(&ulpq->reasm))
714 return;
715
716 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
717 event = sctp_skb2event(pos);
718 tsn = event->tsn;
719
720 /* Since the entire message must be abandoned by the
721 * sender (item A3 in Section 3.5, RFC 3758), we can
722 * free all fragments on the list that are less then
723 * or equal to ctsn_point
724 */
725 if (TSN_lte(tsn, fwd_tsn)) {
726 __skb_unlink(pos, &ulpq->reasm);
727 sctp_ulpevent_free(event);
728 } else
729 break;
730 }
731}
732
733/*
734 * Drain the reassembly queue. If we just cleared parted delivery, it
735 * is possible that the reassembly queue will contain already reassembled
736 * messages. Retrieve any such messages and give them to the user.
737 */
738static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
739{
740 struct sctp_ulpevent *event = NULL;
741 struct sk_buff_head temp;
742
743 if (skb_queue_empty(&ulpq->reasm))
744 return;
745
746 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
747 /* Do ordering if needed. */
748 if ((event) && (event->msg_flags & MSG_EOR)) {
749 skb_queue_head_init(&temp);
750 __skb_queue_tail(&temp, sctp_event2skb(event));
751
752 event = sctp_ulpq_order(ulpq, event);
753 }
754
755 /* Send event to the ULP. 'event' is the
756 * sctp_ulpevent for very first SKB on the temp' list.
757 */
758 if (event)
759 sctp_ulpq_tail_event(ulpq, event);
760 }
761}
762
763
764/* Helper function to gather skbs that have possibly become
765 * ordered by an an incoming chunk.
766 */
767static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
768 struct sctp_ulpevent *event)
769{
770 struct sk_buff_head *event_list;
771 struct sk_buff *pos, *tmp;
772 struct sctp_ulpevent *cevent;
773 struct sctp_stream *stream;
774 __u16 sid, csid, cssn;
775
776 sid = event->stream;
777 stream = &ulpq->asoc->stream;
778
779 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
780
781 /* We are holding the chunks by stream, by SSN. */
782 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
783 cevent = (struct sctp_ulpevent *) pos->cb;
784 csid = cevent->stream;
785 cssn = cevent->ssn;
786
787 /* Have we gone too far? */
788 if (csid > sid)
789 break;
790
791 /* Have we not gone far enough? */
792 if (csid < sid)
793 continue;
794
795 if (cssn != sctp_ssn_peek(stream, in, sid))
796 break;
797
798 /* Found it, so mark in the stream. */
799 sctp_ssn_next(stream, in, sid);
800
801 __skb_unlink(pos, &ulpq->lobby);
802
803 /* Attach all gathered skbs to the event. */
804 __skb_queue_tail(event_list, pos);
805 }
806}
807
808/* Helper function to store chunks needing ordering. */
809static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
810 struct sctp_ulpevent *event)
811{
812 struct sk_buff *pos;
813 struct sctp_ulpevent *cevent;
814 __u16 sid, csid;
815 __u16 ssn, cssn;
816
817 pos = skb_peek_tail(&ulpq->lobby);
818 if (!pos) {
819 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
820 return;
821 }
822
823 sid = event->stream;
824 ssn = event->ssn;
825
826 cevent = (struct sctp_ulpevent *) pos->cb;
827 csid = cevent->stream;
828 cssn = cevent->ssn;
829 if (sid > csid) {
830 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
831 return;
832 }
833
834 if ((sid == csid) && SSN_lt(cssn, ssn)) {
835 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
836 return;
837 }
838
839 /* Find the right place in this list. We store them by
840 * stream ID and then by SSN.
841 */
842 skb_queue_walk(&ulpq->lobby, pos) {
843 cevent = (struct sctp_ulpevent *) pos->cb;
844 csid = cevent->stream;
845 cssn = cevent->ssn;
846
847 if (csid > sid)
848 break;
849 if (csid == sid && SSN_lt(ssn, cssn))
850 break;
851 }
852
853
854 /* Insert before pos. */
855 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
856}
857
858static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
859 struct sctp_ulpevent *event)
860{
861 __u16 sid, ssn;
862 struct sctp_stream *stream;
863
864 /* Check if this message needs ordering. */
865 if (event->msg_flags & SCTP_DATA_UNORDERED)
866 return event;
867
868 /* Note: The stream ID must be verified before this routine. */
869 sid = event->stream;
870 ssn = event->ssn;
871 stream = &ulpq->asoc->stream;
872
873 /* Is this the expected SSN for this stream ID? */
874 if (ssn != sctp_ssn_peek(stream, in, sid)) {
875 /* We've received something out of order, so find where it
876 * needs to be placed. We order by stream and then by SSN.
877 */
878 sctp_ulpq_store_ordered(ulpq, event);
879 return NULL;
880 }
881
882 /* Mark that the next chunk has been found. */
883 sctp_ssn_next(stream, in, sid);
884
885 /* Go find any other chunks that were waiting for
886 * ordering.
887 */
888 sctp_ulpq_retrieve_ordered(ulpq, event);
889
890 return event;
891}
892
893/* Helper function to gather skbs that have possibly become
894 * ordered by forward tsn skipping their dependencies.
895 */
896static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
897{
898 struct sk_buff *pos, *tmp;
899 struct sctp_ulpevent *cevent;
900 struct sctp_ulpevent *event;
901 struct sctp_stream *stream;
902 struct sk_buff_head temp;
903 struct sk_buff_head *lobby = &ulpq->lobby;
904 __u16 csid, cssn;
905
906 stream = &ulpq->asoc->stream;
907
908 /* We are holding the chunks by stream, by SSN. */
909 skb_queue_head_init(&temp);
910 event = NULL;
911 sctp_skb_for_each(pos, lobby, tmp) {
912 cevent = (struct sctp_ulpevent *) pos->cb;
913 csid = cevent->stream;
914 cssn = cevent->ssn;
915
916 /* Have we gone too far? */
917 if (csid > sid)
918 break;
919
920 /* Have we not gone far enough? */
921 if (csid < sid)
922 continue;
923
924 /* see if this ssn has been marked by skipping */
925 if (!SSN_lt(cssn, sctp_ssn_peek(stream, in, csid)))
926 break;
927
928 __skb_unlink(pos, lobby);
929 if (!event)
930 /* Create a temporary list to collect chunks on. */
931 event = sctp_skb2event(pos);
932
933 /* Attach all gathered skbs to the event. */
934 __skb_queue_tail(&temp, pos);
935 }
936
937 /* If we didn't reap any data, see if the next expected SSN
938 * is next on the queue and if so, use that.
939 */
940 if (event == NULL && pos != (struct sk_buff *)lobby) {
941 cevent = (struct sctp_ulpevent *) pos->cb;
942 csid = cevent->stream;
943 cssn = cevent->ssn;
944
945 if (csid == sid && cssn == sctp_ssn_peek(stream, in, csid)) {
946 sctp_ssn_next(stream, in, csid);
947 __skb_unlink(pos, lobby);
948 __skb_queue_tail(&temp, pos);
949 event = sctp_skb2event(pos);
950 }
951 }
952
953 /* Send event to the ULP. 'event' is the sctp_ulpevent for
954 * very first SKB on the 'temp' list.
955 */
956 if (event) {
957 /* see if we have more ordered that we can deliver */
958 sctp_ulpq_retrieve_ordered(ulpq, event);
959 sctp_ulpq_tail_event(ulpq, event);
960 }
961}
962
963/* Skip over an SSN. This is used during the processing of
964 * Forwared TSN chunk to skip over the abandoned ordered data
965 */
966void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
967{
968 struct sctp_stream *stream;
969
970 /* Note: The stream ID must be verified before this routine. */
971 stream = &ulpq->asoc->stream;
972
973 /* Is this an old SSN? If so ignore. */
974 if (SSN_lt(ssn, sctp_ssn_peek(stream, in, sid)))
975 return;
976
977 /* Mark that we are no longer expecting this SSN or lower. */
978 sctp_ssn_skip(stream, in, sid, ssn);
979
980 /* Go find any other chunks that were waiting for
981 * ordering and deliver them if needed.
982 */
983 sctp_ulpq_reap_ordered(ulpq, sid);
984}
985
986__u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq, struct sk_buff_head *list,
987 __u16 needed)
988{
989 __u16 freed = 0;
990 __u32 tsn, last_tsn;
991 struct sk_buff *skb, *flist, *last;
992 struct sctp_ulpevent *event;
993 struct sctp_tsnmap *tsnmap;
994
995 tsnmap = &ulpq->asoc->peer.tsn_map;
996
997 while ((skb = skb_peek_tail(list)) != NULL) {
998 event = sctp_skb2event(skb);
999 tsn = event->tsn;
1000
1001 /* Don't renege below the Cumulative TSN ACK Point. */
1002 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
1003 break;
1004
1005 /* Events in ordering queue may have multiple fragments
1006 * corresponding to additional TSNs. Sum the total
1007 * freed space; find the last TSN.
1008 */
1009 freed += skb_headlen(skb);
1010 flist = skb_shinfo(skb)->frag_list;
1011 for (last = flist; flist; flist = flist->next) {
1012 last = flist;
1013 freed += skb_headlen(last);
1014 }
1015 if (last)
1016 last_tsn = sctp_skb2event(last)->tsn;
1017 else
1018 last_tsn = tsn;
1019
1020 /* Unlink the event, then renege all applicable TSNs. */
1021 __skb_unlink(skb, list);
1022 sctp_ulpevent_free(event);
1023 while (TSN_lte(tsn, last_tsn)) {
1024 sctp_tsnmap_renege(tsnmap, tsn);
1025 tsn++;
1026 }
1027 if (freed >= needed)
1028 return freed;
1029 }
1030
1031 return freed;
1032}
1033
1034/* Renege 'needed' bytes from the ordering queue. */
1035static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1036{
1037 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1038}
1039
1040/* Renege 'needed' bytes from the reassembly queue. */
1041static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1042{
1043 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1044}
1045
1046/* Partial deliver the first message as there is pressure on rwnd. */
1047void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1048 gfp_t gfp)
1049{
1050 struct sctp_ulpevent *event;
1051 struct sctp_association *asoc;
1052 struct sctp_sock *sp;
1053 __u32 ctsn;
1054 struct sk_buff *skb;
1055
1056 asoc = ulpq->asoc;
1057 sp = sctp_sk(asoc->base.sk);
1058
1059 /* If the association is already in Partial Delivery mode
1060 * we have nothing to do.
1061 */
1062 if (ulpq->pd_mode)
1063 return;
1064
1065 /* Data must be at or below the Cumulative TSN ACK Point to
1066 * start partial delivery.
1067 */
1068 skb = skb_peek(&asoc->ulpq.reasm);
1069 if (skb != NULL) {
1070 ctsn = sctp_skb2event(skb)->tsn;
1071 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1072 return;
1073 }
1074
1075 /* If the user enabled fragment interleave socket option,
1076 * multiple associations can enter partial delivery.
1077 * Otherwise, we can only enter partial delivery if the
1078 * socket is not in partial deliver mode.
1079 */
1080 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1081 /* Is partial delivery possible? */
1082 event = sctp_ulpq_retrieve_first(ulpq);
1083 /* Send event to the ULP. */
1084 if (event) {
1085 sctp_ulpq_tail_event(ulpq, event);
1086 sctp_ulpq_set_pd(ulpq);
1087 return;
1088 }
1089 }
1090}
1091
1092/* Renege some packets to make room for an incoming chunk. */
1093void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1094 gfp_t gfp)
1095{
1096 struct sctp_association *asoc = ulpq->asoc;
1097 __u32 freed = 0;
1098 __u16 needed;
1099
1100 needed = ntohs(chunk->chunk_hdr->length) -
1101 sizeof(struct sctp_data_chunk);
1102
1103 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1104 freed = sctp_ulpq_renege_order(ulpq, needed);
1105 if (freed < needed)
1106 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1107 }
1108 /* If able to free enough room, accept this chunk. */
1109 if (freed >= needed) {
1110 int retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1111 /*
1112 * Enter partial delivery if chunk has not been
1113 * delivered; otherwise, drain the reassembly queue.
1114 */
1115 if (retval <= 0)
1116 sctp_ulpq_partial_delivery(ulpq, gfp);
1117 else if (retval == 1)
1118 sctp_ulpq_reasm_drain(ulpq);
1119 }
1120
1121 sk_mem_reclaim(asoc->base.sk);
1122}
1123
1124
1125
1126/* Notify the application if an association is aborted and in
1127 * partial delivery mode. Send up any pending received messages.
1128 */
1129void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1130{
1131 struct sctp_ulpevent *ev = NULL;
1132 struct sock *sk;
1133 struct sctp_sock *sp;
1134
1135 if (!ulpq->pd_mode)
1136 return;
1137
1138 sk = ulpq->asoc->base.sk;
1139 sp = sctp_sk(sk);
1140 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1141 &sctp_sk(sk)->subscribe))
1142 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1143 SCTP_PARTIAL_DELIVERY_ABORTED,
1144 0, 0, 0, gfp);
1145 if (ev)
1146 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1147
1148 /* If there is data waiting, send it up the socket now. */
1149 if ((sctp_ulpq_clear_pd(ulpq) || ev) && !sp->data_ready_signalled) {
1150 sp->data_ready_signalled = 1;
1151 sk->sk_data_ready(sk);
1152 }
1153}
1/* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
8 *
9 * This abstraction carries sctp events to the ULP (sockets).
10 *
11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This SCTP implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, see
25 * <http://www.gnu.org/licenses/>.
26 *
27 * Please send any bug reports or fixes you make to the
28 * email address(es):
29 * lksctp developers <linux-sctp@vger.kernel.org>
30 *
31 * Written or modified by:
32 * Jon Grimm <jgrimm@us.ibm.com>
33 * La Monte H.P. Yarroll <piggy@acm.org>
34 * Sridhar Samudrala <sri@us.ibm.com>
35 */
36
37#include <linux/slab.h>
38#include <linux/types.h>
39#include <linux/skbuff.h>
40#include <net/sock.h>
41#include <net/busy_poll.h>
42#include <net/sctp/structs.h>
43#include <net/sctp/sctp.h>
44#include <net/sctp/sm.h>
45
46/* Forward declarations for internal helpers. */
47static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
48 struct sctp_ulpevent *);
49static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *,
50 struct sctp_ulpevent *);
51static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
52
53/* 1st Level Abstractions */
54
55/* Initialize a ULP queue from a block of memory. */
56struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
57 struct sctp_association *asoc)
58{
59 memset(ulpq, 0, sizeof(struct sctp_ulpq));
60
61 ulpq->asoc = asoc;
62 skb_queue_head_init(&ulpq->reasm);
63 skb_queue_head_init(&ulpq->lobby);
64 ulpq->pd_mode = 0;
65
66 return ulpq;
67}
68
69
70/* Flush the reassembly and ordering queues. */
71void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
72{
73 struct sk_buff *skb;
74 struct sctp_ulpevent *event;
75
76 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
77 event = sctp_skb2event(skb);
78 sctp_ulpevent_free(event);
79 }
80
81 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
82 event = sctp_skb2event(skb);
83 sctp_ulpevent_free(event);
84 }
85
86}
87
88/* Dispose of a ulpqueue. */
89void sctp_ulpq_free(struct sctp_ulpq *ulpq)
90{
91 sctp_ulpq_flush(ulpq);
92}
93
94/* Process an incoming DATA chunk. */
95int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
96 gfp_t gfp)
97{
98 struct sk_buff_head temp;
99 struct sctp_ulpevent *event;
100 int event_eor = 0;
101
102 /* Create an event from the incoming chunk. */
103 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
104 if (!event)
105 return -ENOMEM;
106
107 /* Do reassembly if needed. */
108 event = sctp_ulpq_reasm(ulpq, event);
109
110 /* Do ordering if needed. */
111 if ((event) && (event->msg_flags & MSG_EOR)) {
112 /* Create a temporary list to collect chunks on. */
113 skb_queue_head_init(&temp);
114 __skb_queue_tail(&temp, sctp_event2skb(event));
115
116 event = sctp_ulpq_order(ulpq, event);
117 }
118
119 /* Send event to the ULP. 'event' is the sctp_ulpevent for
120 * very first SKB on the 'temp' list.
121 */
122 if (event) {
123 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
124 sctp_ulpq_tail_event(ulpq, event);
125 }
126
127 return event_eor;
128}
129
130/* Add a new event for propagation to the ULP. */
131/* Clear the partial delivery mode for this socket. Note: This
132 * assumes that no association is currently in partial delivery mode.
133 */
134int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
135{
136 struct sctp_sock *sp = sctp_sk(sk);
137
138 if (atomic_dec_and_test(&sp->pd_mode)) {
139 /* This means there are no other associations in PD, so
140 * we can go ahead and clear out the lobby in one shot
141 */
142 if (!skb_queue_empty(&sp->pd_lobby)) {
143 struct list_head *list;
144 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
145 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
146 INIT_LIST_HEAD(list);
147 return 1;
148 }
149 } else {
150 /* There are other associations in PD, so we only need to
151 * pull stuff out of the lobby that belongs to the
152 * associations that is exiting PD (all of its notifications
153 * are posted here).
154 */
155 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
156 struct sk_buff *skb, *tmp;
157 struct sctp_ulpevent *event;
158
159 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
160 event = sctp_skb2event(skb);
161 if (event->asoc == asoc) {
162 __skb_unlink(skb, &sp->pd_lobby);
163 __skb_queue_tail(&sk->sk_receive_queue,
164 skb);
165 }
166 }
167 }
168 }
169
170 return 0;
171}
172
173/* Set the pd_mode on the socket and ulpq */
174static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
175{
176 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
177
178 atomic_inc(&sp->pd_mode);
179 ulpq->pd_mode = 1;
180}
181
182/* Clear the pd_mode and restart any pending messages waiting for delivery. */
183static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
184{
185 ulpq->pd_mode = 0;
186 sctp_ulpq_reasm_drain(ulpq);
187 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
188}
189
190/* If the SKB of 'event' is on a list, it is the first such member
191 * of that list.
192 */
193int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
194{
195 struct sock *sk = ulpq->asoc->base.sk;
196 struct sk_buff_head *queue, *skb_list;
197 struct sk_buff *skb = sctp_event2skb(event);
198 int clear_pd = 0;
199
200 skb_list = (struct sk_buff_head *) skb->prev;
201
202 /* If the socket is just going to throw this away, do not
203 * even try to deliver it.
204 */
205 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
206 goto out_free;
207
208 if (!sctp_ulpevent_is_notification(event)) {
209 sk_mark_napi_id(sk, skb);
210 sk_incoming_cpu_update(sk);
211 }
212 /* Check if the user wishes to receive this event. */
213 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
214 goto out_free;
215
216 /* If we are in partial delivery mode, post to the lobby until
217 * partial delivery is cleared, unless, of course _this_ is
218 * the association the cause of the partial delivery.
219 */
220
221 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
222 queue = &sk->sk_receive_queue;
223 } else {
224 if (ulpq->pd_mode) {
225 /* If the association is in partial delivery, we
226 * need to finish delivering the partially processed
227 * packet before passing any other data. This is
228 * because we don't truly support stream interleaving.
229 */
230 if ((event->msg_flags & MSG_NOTIFICATION) ||
231 (SCTP_DATA_NOT_FRAG ==
232 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
233 queue = &sctp_sk(sk)->pd_lobby;
234 else {
235 clear_pd = event->msg_flags & MSG_EOR;
236 queue = &sk->sk_receive_queue;
237 }
238 } else {
239 /*
240 * If fragment interleave is enabled, we
241 * can queue this to the receive queue instead
242 * of the lobby.
243 */
244 if (sctp_sk(sk)->frag_interleave)
245 queue = &sk->sk_receive_queue;
246 else
247 queue = &sctp_sk(sk)->pd_lobby;
248 }
249 }
250
251 /* If we are harvesting multiple skbs they will be
252 * collected on a list.
253 */
254 if (skb_list)
255 sctp_skb_list_tail(skb_list, queue);
256 else
257 __skb_queue_tail(queue, skb);
258
259 /* Did we just complete partial delivery and need to get
260 * rolling again? Move pending data to the receive
261 * queue.
262 */
263 if (clear_pd)
264 sctp_ulpq_clear_pd(ulpq);
265
266 if (queue == &sk->sk_receive_queue)
267 sk->sk_data_ready(sk);
268 return 1;
269
270out_free:
271 if (skb_list)
272 sctp_queue_purge_ulpevents(skb_list);
273 else
274 sctp_ulpevent_free(event);
275
276 return 0;
277}
278
279/* 2nd Level Abstractions */
280
281/* Helper function to store chunks that need to be reassembled. */
282static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
283 struct sctp_ulpevent *event)
284{
285 struct sk_buff *pos;
286 struct sctp_ulpevent *cevent;
287 __u32 tsn, ctsn;
288
289 tsn = event->tsn;
290
291 /* See if it belongs at the end. */
292 pos = skb_peek_tail(&ulpq->reasm);
293 if (!pos) {
294 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
295 return;
296 }
297
298 /* Short circuit just dropping it at the end. */
299 cevent = sctp_skb2event(pos);
300 ctsn = cevent->tsn;
301 if (TSN_lt(ctsn, tsn)) {
302 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
303 return;
304 }
305
306 /* Find the right place in this list. We store them by TSN. */
307 skb_queue_walk(&ulpq->reasm, pos) {
308 cevent = sctp_skb2event(pos);
309 ctsn = cevent->tsn;
310
311 if (TSN_lt(tsn, ctsn))
312 break;
313 }
314
315 /* Insert before pos. */
316 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
317
318}
319
320/* Helper function to return an event corresponding to the reassembled
321 * datagram.
322 * This routine creates a re-assembled skb given the first and last skb's
323 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
324 * payload was fragmented on the way and ip had to reassemble them.
325 * We add the rest of skb's to the first skb's fraglist.
326 */
327static struct sctp_ulpevent *sctp_make_reassembled_event(struct net *net,
328 struct sk_buff_head *queue, struct sk_buff *f_frag,
329 struct sk_buff *l_frag)
330{
331 struct sk_buff *pos;
332 struct sk_buff *new = NULL;
333 struct sctp_ulpevent *event;
334 struct sk_buff *pnext, *last;
335 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
336
337 /* Store the pointer to the 2nd skb */
338 if (f_frag == l_frag)
339 pos = NULL;
340 else
341 pos = f_frag->next;
342
343 /* Get the last skb in the f_frag's frag_list if present. */
344 for (last = list; list; last = list, list = list->next)
345 ;
346
347 /* Add the list of remaining fragments to the first fragments
348 * frag_list.
349 */
350 if (last)
351 last->next = pos;
352 else {
353 if (skb_cloned(f_frag)) {
354 /* This is a cloned skb, we can't just modify
355 * the frag_list. We need a new skb to do that.
356 * Instead of calling skb_unshare(), we'll do it
357 * ourselves since we need to delay the free.
358 */
359 new = skb_copy(f_frag, GFP_ATOMIC);
360 if (!new)
361 return NULL; /* try again later */
362
363 sctp_skb_set_owner_r(new, f_frag->sk);
364
365 skb_shinfo(new)->frag_list = pos;
366 } else
367 skb_shinfo(f_frag)->frag_list = pos;
368 }
369
370 /* Remove the first fragment from the reassembly queue. */
371 __skb_unlink(f_frag, queue);
372
373 /* if we did unshare, then free the old skb and re-assign */
374 if (new) {
375 kfree_skb(f_frag);
376 f_frag = new;
377 }
378
379 while (pos) {
380
381 pnext = pos->next;
382
383 /* Update the len and data_len fields of the first fragment. */
384 f_frag->len += pos->len;
385 f_frag->data_len += pos->len;
386
387 /* Remove the fragment from the reassembly queue. */
388 __skb_unlink(pos, queue);
389
390 /* Break if we have reached the last fragment. */
391 if (pos == l_frag)
392 break;
393 pos->next = pnext;
394 pos = pnext;
395 }
396
397 event = sctp_skb2event(f_frag);
398 SCTP_INC_STATS(net, SCTP_MIB_REASMUSRMSGS);
399
400 return event;
401}
402
403
404/* Helper function to check if an incoming chunk has filled up the last
405 * missing fragment in a SCTP datagram and return the corresponding event.
406 */
407static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
408{
409 struct sk_buff *pos;
410 struct sctp_ulpevent *cevent;
411 struct sk_buff *first_frag = NULL;
412 __u32 ctsn, next_tsn;
413 struct sctp_ulpevent *retval = NULL;
414 struct sk_buff *pd_first = NULL;
415 struct sk_buff *pd_last = NULL;
416 size_t pd_len = 0;
417 struct sctp_association *asoc;
418 u32 pd_point;
419
420 /* Initialized to 0 just to avoid compiler warning message. Will
421 * never be used with this value. It is referenced only after it
422 * is set when we find the first fragment of a message.
423 */
424 next_tsn = 0;
425
426 /* The chunks are held in the reasm queue sorted by TSN.
427 * Walk through the queue sequentially and look for a sequence of
428 * fragmented chunks that complete a datagram.
429 * 'first_frag' and next_tsn are reset when we find a chunk which
430 * is the first fragment of a datagram. Once these 2 fields are set
431 * we expect to find the remaining middle fragments and the last
432 * fragment in order. If not, first_frag is reset to NULL and we
433 * start the next pass when we find another first fragment.
434 *
435 * There is a potential to do partial delivery if user sets
436 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
437 * to see if can do PD.
438 */
439 skb_queue_walk(&ulpq->reasm, pos) {
440 cevent = sctp_skb2event(pos);
441 ctsn = cevent->tsn;
442
443 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
444 case SCTP_DATA_FIRST_FRAG:
445 /* If this "FIRST_FRAG" is the first
446 * element in the queue, then count it towards
447 * possible PD.
448 */
449 if (pos == ulpq->reasm.next) {
450 pd_first = pos;
451 pd_last = pos;
452 pd_len = pos->len;
453 } else {
454 pd_first = NULL;
455 pd_last = NULL;
456 pd_len = 0;
457 }
458
459 first_frag = pos;
460 next_tsn = ctsn + 1;
461 break;
462
463 case SCTP_DATA_MIDDLE_FRAG:
464 if ((first_frag) && (ctsn == next_tsn)) {
465 next_tsn++;
466 if (pd_first) {
467 pd_last = pos;
468 pd_len += pos->len;
469 }
470 } else
471 first_frag = NULL;
472 break;
473
474 case SCTP_DATA_LAST_FRAG:
475 if (first_frag && (ctsn == next_tsn))
476 goto found;
477 else
478 first_frag = NULL;
479 break;
480 }
481 }
482
483 asoc = ulpq->asoc;
484 if (pd_first) {
485 /* Make sure we can enter partial deliver.
486 * We can trigger partial delivery only if framgent
487 * interleave is set, or the socket is not already
488 * in partial delivery.
489 */
490 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
491 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
492 goto done;
493
494 cevent = sctp_skb2event(pd_first);
495 pd_point = sctp_sk(asoc->base.sk)->pd_point;
496 if (pd_point && pd_point <= pd_len) {
497 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
498 &ulpq->reasm,
499 pd_first,
500 pd_last);
501 if (retval)
502 sctp_ulpq_set_pd(ulpq);
503 }
504 }
505done:
506 return retval;
507found:
508 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
509 &ulpq->reasm, first_frag, pos);
510 if (retval)
511 retval->msg_flags |= MSG_EOR;
512 goto done;
513}
514
515/* Retrieve the next set of fragments of a partial message. */
516static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
517{
518 struct sk_buff *pos, *last_frag, *first_frag;
519 struct sctp_ulpevent *cevent;
520 __u32 ctsn, next_tsn;
521 int is_last;
522 struct sctp_ulpevent *retval;
523
524 /* The chunks are held in the reasm queue sorted by TSN.
525 * Walk through the queue sequentially and look for the first
526 * sequence of fragmented chunks.
527 */
528
529 if (skb_queue_empty(&ulpq->reasm))
530 return NULL;
531
532 last_frag = first_frag = NULL;
533 retval = NULL;
534 next_tsn = 0;
535 is_last = 0;
536
537 skb_queue_walk(&ulpq->reasm, pos) {
538 cevent = sctp_skb2event(pos);
539 ctsn = cevent->tsn;
540
541 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
542 case SCTP_DATA_FIRST_FRAG:
543 if (!first_frag)
544 return NULL;
545 goto done;
546 case SCTP_DATA_MIDDLE_FRAG:
547 if (!first_frag) {
548 first_frag = pos;
549 next_tsn = ctsn + 1;
550 last_frag = pos;
551 } else if (next_tsn == ctsn) {
552 next_tsn++;
553 last_frag = pos;
554 } else
555 goto done;
556 break;
557 case SCTP_DATA_LAST_FRAG:
558 if (!first_frag)
559 first_frag = pos;
560 else if (ctsn != next_tsn)
561 goto done;
562 last_frag = pos;
563 is_last = 1;
564 goto done;
565 default:
566 return NULL;
567 }
568 }
569
570 /* We have the reassembled event. There is no need to look
571 * further.
572 */
573done:
574 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
575 &ulpq->reasm, first_frag, last_frag);
576 if (retval && is_last)
577 retval->msg_flags |= MSG_EOR;
578
579 return retval;
580}
581
582
583/* Helper function to reassemble chunks. Hold chunks on the reasm queue that
584 * need reassembling.
585 */
586static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
587 struct sctp_ulpevent *event)
588{
589 struct sctp_ulpevent *retval = NULL;
590
591 /* Check if this is part of a fragmented message. */
592 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
593 event->msg_flags |= MSG_EOR;
594 return event;
595 }
596
597 sctp_ulpq_store_reasm(ulpq, event);
598 if (!ulpq->pd_mode)
599 retval = sctp_ulpq_retrieve_reassembled(ulpq);
600 else {
601 __u32 ctsn, ctsnap;
602
603 /* Do not even bother unless this is the next tsn to
604 * be delivered.
605 */
606 ctsn = event->tsn;
607 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
608 if (TSN_lte(ctsn, ctsnap))
609 retval = sctp_ulpq_retrieve_partial(ulpq);
610 }
611
612 return retval;
613}
614
615/* Retrieve the first part (sequential fragments) for partial delivery. */
616static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
617{
618 struct sk_buff *pos, *last_frag, *first_frag;
619 struct sctp_ulpevent *cevent;
620 __u32 ctsn, next_tsn;
621 struct sctp_ulpevent *retval;
622
623 /* The chunks are held in the reasm queue sorted by TSN.
624 * Walk through the queue sequentially and look for a sequence of
625 * fragmented chunks that start a datagram.
626 */
627
628 if (skb_queue_empty(&ulpq->reasm))
629 return NULL;
630
631 last_frag = first_frag = NULL;
632 retval = NULL;
633 next_tsn = 0;
634
635 skb_queue_walk(&ulpq->reasm, pos) {
636 cevent = sctp_skb2event(pos);
637 ctsn = cevent->tsn;
638
639 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
640 case SCTP_DATA_FIRST_FRAG:
641 if (!first_frag) {
642 first_frag = pos;
643 next_tsn = ctsn + 1;
644 last_frag = pos;
645 } else
646 goto done;
647 break;
648
649 case SCTP_DATA_MIDDLE_FRAG:
650 if (!first_frag)
651 return NULL;
652 if (ctsn == next_tsn) {
653 next_tsn++;
654 last_frag = pos;
655 } else
656 goto done;
657 break;
658
659 case SCTP_DATA_LAST_FRAG:
660 if (!first_frag)
661 return NULL;
662 else
663 goto done;
664 break;
665
666 default:
667 return NULL;
668 }
669 }
670
671 /* We have the reassembled event. There is no need to look
672 * further.
673 */
674done:
675 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
676 &ulpq->reasm, first_frag, last_frag);
677 return retval;
678}
679
680/*
681 * Flush out stale fragments from the reassembly queue when processing
682 * a Forward TSN.
683 *
684 * RFC 3758, Section 3.6
685 *
686 * After receiving and processing a FORWARD TSN, the data receiver MUST
687 * take cautions in updating its re-assembly queue. The receiver MUST
688 * remove any partially reassembled message, which is still missing one
689 * or more TSNs earlier than or equal to the new cumulative TSN point.
690 * In the event that the receiver has invoked the partial delivery API,
691 * a notification SHOULD also be generated to inform the upper layer API
692 * that the message being partially delivered will NOT be completed.
693 */
694void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
695{
696 struct sk_buff *pos, *tmp;
697 struct sctp_ulpevent *event;
698 __u32 tsn;
699
700 if (skb_queue_empty(&ulpq->reasm))
701 return;
702
703 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
704 event = sctp_skb2event(pos);
705 tsn = event->tsn;
706
707 /* Since the entire message must be abandoned by the
708 * sender (item A3 in Section 3.5, RFC 3758), we can
709 * free all fragments on the list that are less then
710 * or equal to ctsn_point
711 */
712 if (TSN_lte(tsn, fwd_tsn)) {
713 __skb_unlink(pos, &ulpq->reasm);
714 sctp_ulpevent_free(event);
715 } else
716 break;
717 }
718}
719
720/*
721 * Drain the reassembly queue. If we just cleared parted delivery, it
722 * is possible that the reassembly queue will contain already reassembled
723 * messages. Retrieve any such messages and give them to the user.
724 */
725static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
726{
727 struct sctp_ulpevent *event = NULL;
728 struct sk_buff_head temp;
729
730 if (skb_queue_empty(&ulpq->reasm))
731 return;
732
733 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
734 /* Do ordering if needed. */
735 if ((event) && (event->msg_flags & MSG_EOR)) {
736 skb_queue_head_init(&temp);
737 __skb_queue_tail(&temp, sctp_event2skb(event));
738
739 event = sctp_ulpq_order(ulpq, event);
740 }
741
742 /* Send event to the ULP. 'event' is the
743 * sctp_ulpevent for very first SKB on the temp' list.
744 */
745 if (event)
746 sctp_ulpq_tail_event(ulpq, event);
747 }
748}
749
750
751/* Helper function to gather skbs that have possibly become
752 * ordered by an an incoming chunk.
753 */
754static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
755 struct sctp_ulpevent *event)
756{
757 struct sk_buff_head *event_list;
758 struct sk_buff *pos, *tmp;
759 struct sctp_ulpevent *cevent;
760 struct sctp_stream *in;
761 __u16 sid, csid, cssn;
762
763 sid = event->stream;
764 in = &ulpq->asoc->ssnmap->in;
765
766 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
767
768 /* We are holding the chunks by stream, by SSN. */
769 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
770 cevent = (struct sctp_ulpevent *) pos->cb;
771 csid = cevent->stream;
772 cssn = cevent->ssn;
773
774 /* Have we gone too far? */
775 if (csid > sid)
776 break;
777
778 /* Have we not gone far enough? */
779 if (csid < sid)
780 continue;
781
782 if (cssn != sctp_ssn_peek(in, sid))
783 break;
784
785 /* Found it, so mark in the ssnmap. */
786 sctp_ssn_next(in, sid);
787
788 __skb_unlink(pos, &ulpq->lobby);
789
790 /* Attach all gathered skbs to the event. */
791 __skb_queue_tail(event_list, pos);
792 }
793}
794
795/* Helper function to store chunks needing ordering. */
796static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
797 struct sctp_ulpevent *event)
798{
799 struct sk_buff *pos;
800 struct sctp_ulpevent *cevent;
801 __u16 sid, csid;
802 __u16 ssn, cssn;
803
804 pos = skb_peek_tail(&ulpq->lobby);
805 if (!pos) {
806 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
807 return;
808 }
809
810 sid = event->stream;
811 ssn = event->ssn;
812
813 cevent = (struct sctp_ulpevent *) pos->cb;
814 csid = cevent->stream;
815 cssn = cevent->ssn;
816 if (sid > csid) {
817 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
818 return;
819 }
820
821 if ((sid == csid) && SSN_lt(cssn, ssn)) {
822 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
823 return;
824 }
825
826 /* Find the right place in this list. We store them by
827 * stream ID and then by SSN.
828 */
829 skb_queue_walk(&ulpq->lobby, pos) {
830 cevent = (struct sctp_ulpevent *) pos->cb;
831 csid = cevent->stream;
832 cssn = cevent->ssn;
833
834 if (csid > sid)
835 break;
836 if (csid == sid && SSN_lt(ssn, cssn))
837 break;
838 }
839
840
841 /* Insert before pos. */
842 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
843}
844
845static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
846 struct sctp_ulpevent *event)
847{
848 __u16 sid, ssn;
849 struct sctp_stream *in;
850
851 /* Check if this message needs ordering. */
852 if (SCTP_DATA_UNORDERED & event->msg_flags)
853 return event;
854
855 /* Note: The stream ID must be verified before this routine. */
856 sid = event->stream;
857 ssn = event->ssn;
858 in = &ulpq->asoc->ssnmap->in;
859
860 /* Is this the expected SSN for this stream ID? */
861 if (ssn != sctp_ssn_peek(in, sid)) {
862 /* We've received something out of order, so find where it
863 * needs to be placed. We order by stream and then by SSN.
864 */
865 sctp_ulpq_store_ordered(ulpq, event);
866 return NULL;
867 }
868
869 /* Mark that the next chunk has been found. */
870 sctp_ssn_next(in, sid);
871
872 /* Go find any other chunks that were waiting for
873 * ordering.
874 */
875 sctp_ulpq_retrieve_ordered(ulpq, event);
876
877 return event;
878}
879
880/* Helper function to gather skbs that have possibly become
881 * ordered by forward tsn skipping their dependencies.
882 */
883static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
884{
885 struct sk_buff *pos, *tmp;
886 struct sctp_ulpevent *cevent;
887 struct sctp_ulpevent *event;
888 struct sctp_stream *in;
889 struct sk_buff_head temp;
890 struct sk_buff_head *lobby = &ulpq->lobby;
891 __u16 csid, cssn;
892
893 in = &ulpq->asoc->ssnmap->in;
894
895 /* We are holding the chunks by stream, by SSN. */
896 skb_queue_head_init(&temp);
897 event = NULL;
898 sctp_skb_for_each(pos, lobby, tmp) {
899 cevent = (struct sctp_ulpevent *) pos->cb;
900 csid = cevent->stream;
901 cssn = cevent->ssn;
902
903 /* Have we gone too far? */
904 if (csid > sid)
905 break;
906
907 /* Have we not gone far enough? */
908 if (csid < sid)
909 continue;
910
911 /* see if this ssn has been marked by skipping */
912 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
913 break;
914
915 __skb_unlink(pos, lobby);
916 if (!event)
917 /* Create a temporary list to collect chunks on. */
918 event = sctp_skb2event(pos);
919
920 /* Attach all gathered skbs to the event. */
921 __skb_queue_tail(&temp, pos);
922 }
923
924 /* If we didn't reap any data, see if the next expected SSN
925 * is next on the queue and if so, use that.
926 */
927 if (event == NULL && pos != (struct sk_buff *)lobby) {
928 cevent = (struct sctp_ulpevent *) pos->cb;
929 csid = cevent->stream;
930 cssn = cevent->ssn;
931
932 if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
933 sctp_ssn_next(in, csid);
934 __skb_unlink(pos, lobby);
935 __skb_queue_tail(&temp, pos);
936 event = sctp_skb2event(pos);
937 }
938 }
939
940 /* Send event to the ULP. 'event' is the sctp_ulpevent for
941 * very first SKB on the 'temp' list.
942 */
943 if (event) {
944 /* see if we have more ordered that we can deliver */
945 sctp_ulpq_retrieve_ordered(ulpq, event);
946 sctp_ulpq_tail_event(ulpq, event);
947 }
948}
949
950/* Skip over an SSN. This is used during the processing of
951 * Forwared TSN chunk to skip over the abandoned ordered data
952 */
953void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
954{
955 struct sctp_stream *in;
956
957 /* Note: The stream ID must be verified before this routine. */
958 in = &ulpq->asoc->ssnmap->in;
959
960 /* Is this an old SSN? If so ignore. */
961 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
962 return;
963
964 /* Mark that we are no longer expecting this SSN or lower. */
965 sctp_ssn_skip(in, sid, ssn);
966
967 /* Go find any other chunks that were waiting for
968 * ordering and deliver them if needed.
969 */
970 sctp_ulpq_reap_ordered(ulpq, sid);
971}
972
973static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
974 struct sk_buff_head *list, __u16 needed)
975{
976 __u16 freed = 0;
977 __u32 tsn, last_tsn;
978 struct sk_buff *skb, *flist, *last;
979 struct sctp_ulpevent *event;
980 struct sctp_tsnmap *tsnmap;
981
982 tsnmap = &ulpq->asoc->peer.tsn_map;
983
984 while ((skb = skb_peek_tail(list)) != NULL) {
985 event = sctp_skb2event(skb);
986 tsn = event->tsn;
987
988 /* Don't renege below the Cumulative TSN ACK Point. */
989 if (TSN_lte(tsn, sctp_tsnmap_get_ctsn(tsnmap)))
990 break;
991
992 /* Events in ordering queue may have multiple fragments
993 * corresponding to additional TSNs. Sum the total
994 * freed space; find the last TSN.
995 */
996 freed += skb_headlen(skb);
997 flist = skb_shinfo(skb)->frag_list;
998 for (last = flist; flist; flist = flist->next) {
999 last = flist;
1000 freed += skb_headlen(last);
1001 }
1002 if (last)
1003 last_tsn = sctp_skb2event(last)->tsn;
1004 else
1005 last_tsn = tsn;
1006
1007 /* Unlink the event, then renege all applicable TSNs. */
1008 __skb_unlink(skb, list);
1009 sctp_ulpevent_free(event);
1010 while (TSN_lte(tsn, last_tsn)) {
1011 sctp_tsnmap_renege(tsnmap, tsn);
1012 tsn++;
1013 }
1014 if (freed >= needed)
1015 return freed;
1016 }
1017
1018 return freed;
1019}
1020
1021/* Renege 'needed' bytes from the ordering queue. */
1022static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
1023{
1024 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
1025}
1026
1027/* Renege 'needed' bytes from the reassembly queue. */
1028static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
1029{
1030 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
1031}
1032
1033/* Partial deliver the first message as there is pressure on rwnd. */
1034void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
1035 gfp_t gfp)
1036{
1037 struct sctp_ulpevent *event;
1038 struct sctp_association *asoc;
1039 struct sctp_sock *sp;
1040 __u32 ctsn;
1041 struct sk_buff *skb;
1042
1043 asoc = ulpq->asoc;
1044 sp = sctp_sk(asoc->base.sk);
1045
1046 /* If the association is already in Partial Delivery mode
1047 * we have nothing to do.
1048 */
1049 if (ulpq->pd_mode)
1050 return;
1051
1052 /* Data must be at or below the Cumulative TSN ACK Point to
1053 * start partial delivery.
1054 */
1055 skb = skb_peek(&asoc->ulpq.reasm);
1056 if (skb != NULL) {
1057 ctsn = sctp_skb2event(skb)->tsn;
1058 if (!TSN_lte(ctsn, sctp_tsnmap_get_ctsn(&asoc->peer.tsn_map)))
1059 return;
1060 }
1061
1062 /* If the user enabled fragment interleave socket option,
1063 * multiple associations can enter partial delivery.
1064 * Otherwise, we can only enter partial delivery if the
1065 * socket is not in partial deliver mode.
1066 */
1067 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1068 /* Is partial delivery possible? */
1069 event = sctp_ulpq_retrieve_first(ulpq);
1070 /* Send event to the ULP. */
1071 if (event) {
1072 sctp_ulpq_tail_event(ulpq, event);
1073 sctp_ulpq_set_pd(ulpq);
1074 return;
1075 }
1076 }
1077}
1078
1079/* Renege some packets to make room for an incoming chunk. */
1080void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1081 gfp_t gfp)
1082{
1083 struct sctp_association *asoc;
1084 __u16 needed, freed;
1085
1086 asoc = ulpq->asoc;
1087
1088 if (chunk) {
1089 needed = ntohs(chunk->chunk_hdr->length);
1090 needed -= sizeof(sctp_data_chunk_t);
1091 } else
1092 needed = SCTP_DEFAULT_MAXWINDOW;
1093
1094 freed = 0;
1095
1096 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1097 freed = sctp_ulpq_renege_order(ulpq, needed);
1098 if (freed < needed) {
1099 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1100 }
1101 }
1102 /* If able to free enough room, accept this chunk. */
1103 if (chunk && (freed >= needed)) {
1104 int retval;
1105 retval = sctp_ulpq_tail_data(ulpq, chunk, gfp);
1106 /*
1107 * Enter partial delivery if chunk has not been
1108 * delivered; otherwise, drain the reassembly queue.
1109 */
1110 if (retval <= 0)
1111 sctp_ulpq_partial_delivery(ulpq, gfp);
1112 else if (retval == 1)
1113 sctp_ulpq_reasm_drain(ulpq);
1114 }
1115
1116 sk_mem_reclaim(asoc->base.sk);
1117}
1118
1119
1120
1121/* Notify the application if an association is aborted and in
1122 * partial delivery mode. Send up any pending received messages.
1123 */
1124void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1125{
1126 struct sctp_ulpevent *ev = NULL;
1127 struct sock *sk;
1128
1129 if (!ulpq->pd_mode)
1130 return;
1131
1132 sk = ulpq->asoc->base.sk;
1133 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1134 &sctp_sk(sk)->subscribe))
1135 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1136 SCTP_PARTIAL_DELIVERY_ABORTED,
1137 gfp);
1138 if (ev)
1139 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1140
1141 /* If there is data waiting, send it up the socket now. */
1142 if (sctp_ulpq_clear_pd(ulpq) || ev)
1143 sk->sk_data_ready(sk);
1144}