Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* SCTP kernel implementation
3 * (C) Copyright Red Hat Inc. 2017
4 *
5 * This file is part of the SCTP kernel implementation
6 *
7 * These functions implement sctp stream message interleaving, mostly
8 * including I-DATA and I-FORWARD-TSN chunks process.
9 *
10 * Please send any bug reports or fixes you make to the
11 * email addresched(es):
12 * lksctp developers <linux-sctp@vger.kernel.org>
13 *
14 * Written or modified by:
15 * Xin Long <lucien.xin@gmail.com>
16 */
17
18#include <net/busy_poll.h>
19#include <net/sctp/sctp.h>
20#include <net/sctp/sm.h>
21#include <net/sctp/ulpevent.h>
22#include <linux/sctp.h>
23
24static struct sctp_chunk *sctp_make_idatafrag_empty(
25 const struct sctp_association *asoc,
26 const struct sctp_sndrcvinfo *sinfo,
27 int len, __u8 flags, gfp_t gfp)
28{
29 struct sctp_chunk *retval;
30 struct sctp_idatahdr dp;
31
32 memset(&dp, 0, sizeof(dp));
33 dp.stream = htons(sinfo->sinfo_stream);
34
35 if (sinfo->sinfo_flags & SCTP_UNORDERED)
36 flags |= SCTP_DATA_UNORDERED;
37
38 retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
39 if (!retval)
40 return NULL;
41
42 retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
43 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
44
45 return retval;
46}
47
48static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
49{
50 struct sctp_stream *stream;
51 struct sctp_chunk *lchunk;
52 __u32 cfsn = 0;
53 __u16 sid;
54
55 if (chunk->has_mid)
56 return;
57
58 sid = sctp_chunk_stream_no(chunk);
59 stream = &chunk->asoc->stream;
60
61 list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
62 struct sctp_idatahdr *hdr;
63 __u32 mid;
64
65 lchunk->has_mid = 1;
66
67 hdr = lchunk->subh.idata_hdr;
68
69 if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
70 hdr->ppid = lchunk->sinfo.sinfo_ppid;
71 else
72 hdr->fsn = htonl(cfsn++);
73
74 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
75 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
76 sctp_mid_uo_next(stream, out, sid) :
77 sctp_mid_uo_peek(stream, out, sid);
78 } else {
79 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
80 sctp_mid_next(stream, out, sid) :
81 sctp_mid_peek(stream, out, sid);
82 }
83 hdr->mid = htonl(mid);
84 }
85}
86
87static bool sctp_validate_data(struct sctp_chunk *chunk)
88{
89 struct sctp_stream *stream;
90 __u16 sid, ssn;
91
92 if (chunk->chunk_hdr->type != SCTP_CID_DATA)
93 return false;
94
95 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
96 return true;
97
98 stream = &chunk->asoc->stream;
99 sid = sctp_chunk_stream_no(chunk);
100 ssn = ntohs(chunk->subh.data_hdr->ssn);
101
102 return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
103}
104
105static bool sctp_validate_idata(struct sctp_chunk *chunk)
106{
107 struct sctp_stream *stream;
108 __u32 mid;
109 __u16 sid;
110
111 if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
112 return false;
113
114 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
115 return true;
116
117 stream = &chunk->asoc->stream;
118 sid = sctp_chunk_stream_no(chunk);
119 mid = ntohl(chunk->subh.idata_hdr->mid);
120
121 return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
122}
123
124static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
125 struct sctp_ulpevent *event)
126{
127 struct sctp_ulpevent *cevent;
128 struct sk_buff *pos, *loc;
129
130 pos = skb_peek_tail(&ulpq->reasm);
131 if (!pos) {
132 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
133 return;
134 }
135
136 cevent = sctp_skb2event(pos);
137
138 if (event->stream == cevent->stream &&
139 event->mid == cevent->mid &&
140 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
141 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
142 event->fsn > cevent->fsn))) {
143 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
144 return;
145 }
146
147 if ((event->stream == cevent->stream &&
148 MID_lt(cevent->mid, event->mid)) ||
149 event->stream > cevent->stream) {
150 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
151 return;
152 }
153
154 loc = NULL;
155 skb_queue_walk(&ulpq->reasm, pos) {
156 cevent = sctp_skb2event(pos);
157
158 if (event->stream < cevent->stream ||
159 (event->stream == cevent->stream &&
160 MID_lt(event->mid, cevent->mid))) {
161 loc = pos;
162 break;
163 }
164 if (event->stream == cevent->stream &&
165 event->mid == cevent->mid &&
166 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
167 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
168 event->fsn < cevent->fsn)) {
169 loc = pos;
170 break;
171 }
172 }
173
174 if (!loc)
175 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
176 else
177 __skb_queue_before(&ulpq->reasm, loc, sctp_event2skb(event));
178}
179
180static struct sctp_ulpevent *sctp_intl_retrieve_partial(
181 struct sctp_ulpq *ulpq,
182 struct sctp_ulpevent *event)
183{
184 struct sk_buff *first_frag = NULL;
185 struct sk_buff *last_frag = NULL;
186 struct sctp_ulpevent *retval;
187 struct sctp_stream_in *sin;
188 struct sk_buff *pos;
189 __u32 next_fsn = 0;
190 int is_last = 0;
191
192 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
193
194 skb_queue_walk(&ulpq->reasm, pos) {
195 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
196
197 if (cevent->stream < event->stream)
198 continue;
199
200 if (cevent->stream > event->stream ||
201 cevent->mid != sin->mid)
202 break;
203
204 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
205 case SCTP_DATA_FIRST_FRAG:
206 goto out;
207 case SCTP_DATA_MIDDLE_FRAG:
208 if (!first_frag) {
209 if (cevent->fsn == sin->fsn) {
210 first_frag = pos;
211 last_frag = pos;
212 next_fsn = cevent->fsn + 1;
213 }
214 } else if (cevent->fsn == next_fsn) {
215 last_frag = pos;
216 next_fsn++;
217 } else {
218 goto out;
219 }
220 break;
221 case SCTP_DATA_LAST_FRAG:
222 if (!first_frag) {
223 if (cevent->fsn == sin->fsn) {
224 first_frag = pos;
225 last_frag = pos;
226 next_fsn = 0;
227 is_last = 1;
228 }
229 } else if (cevent->fsn == next_fsn) {
230 last_frag = pos;
231 next_fsn = 0;
232 is_last = 1;
233 }
234 goto out;
235 default:
236 goto out;
237 }
238 }
239
240out:
241 if (!first_frag)
242 return NULL;
243
244 retval = sctp_make_reassembled_event(ulpq->asoc->base.net, &ulpq->reasm,
245 first_frag, last_frag);
246 if (retval) {
247 sin->fsn = next_fsn;
248 if (is_last) {
249 retval->msg_flags |= MSG_EOR;
250 sin->pd_mode = 0;
251 }
252 }
253
254 return retval;
255}
256
257static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
258 struct sctp_ulpq *ulpq,
259 struct sctp_ulpevent *event)
260{
261 struct sctp_association *asoc = ulpq->asoc;
262 struct sk_buff *pos, *first_frag = NULL;
263 struct sctp_ulpevent *retval = NULL;
264 struct sk_buff *pd_first = NULL;
265 struct sk_buff *pd_last = NULL;
266 struct sctp_stream_in *sin;
267 __u32 next_fsn = 0;
268 __u32 pd_point = 0;
269 __u32 pd_len = 0;
270 __u32 mid = 0;
271
272 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
273
274 skb_queue_walk(&ulpq->reasm, pos) {
275 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
276
277 if (cevent->stream < event->stream)
278 continue;
279 if (cevent->stream > event->stream)
280 break;
281
282 if (MID_lt(cevent->mid, event->mid))
283 continue;
284 if (MID_lt(event->mid, cevent->mid))
285 break;
286
287 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
288 case SCTP_DATA_FIRST_FRAG:
289 if (cevent->mid == sin->mid) {
290 pd_first = pos;
291 pd_last = pos;
292 pd_len = pos->len;
293 }
294
295 first_frag = pos;
296 next_fsn = 0;
297 mid = cevent->mid;
298 break;
299
300 case SCTP_DATA_MIDDLE_FRAG:
301 if (first_frag && cevent->mid == mid &&
302 cevent->fsn == next_fsn) {
303 next_fsn++;
304 if (pd_first) {
305 pd_last = pos;
306 pd_len += pos->len;
307 }
308 } else {
309 first_frag = NULL;
310 }
311 break;
312
313 case SCTP_DATA_LAST_FRAG:
314 if (first_frag && cevent->mid == mid &&
315 cevent->fsn == next_fsn)
316 goto found;
317 else
318 first_frag = NULL;
319 break;
320 }
321 }
322
323 if (!pd_first)
324 goto out;
325
326 pd_point = sctp_sk(asoc->base.sk)->pd_point;
327 if (pd_point && pd_point <= pd_len) {
328 retval = sctp_make_reassembled_event(asoc->base.net,
329 &ulpq->reasm,
330 pd_first, pd_last);
331 if (retval) {
332 sin->fsn = next_fsn;
333 sin->pd_mode = 1;
334 }
335 }
336 goto out;
337
338found:
339 retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm,
340 first_frag, pos);
341 if (retval)
342 retval->msg_flags |= MSG_EOR;
343
344out:
345 return retval;
346}
347
348static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
349 struct sctp_ulpevent *event)
350{
351 struct sctp_ulpevent *retval = NULL;
352 struct sctp_stream_in *sin;
353
354 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
355 event->msg_flags |= MSG_EOR;
356 return event;
357 }
358
359 sctp_intl_store_reasm(ulpq, event);
360
361 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
362 if (sin->pd_mode && event->mid == sin->mid &&
363 event->fsn == sin->fsn)
364 retval = sctp_intl_retrieve_partial(ulpq, event);
365
366 if (!retval)
367 retval = sctp_intl_retrieve_reassembled(ulpq, event);
368
369 return retval;
370}
371
372static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
373 struct sctp_ulpevent *event)
374{
375 struct sctp_ulpevent *cevent;
376 struct sk_buff *pos, *loc;
377
378 pos = skb_peek_tail(&ulpq->lobby);
379 if (!pos) {
380 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
381 return;
382 }
383
384 cevent = (struct sctp_ulpevent *)pos->cb;
385 if (event->stream == cevent->stream &&
386 MID_lt(cevent->mid, event->mid)) {
387 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
388 return;
389 }
390
391 if (event->stream > cevent->stream) {
392 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
393 return;
394 }
395
396 loc = NULL;
397 skb_queue_walk(&ulpq->lobby, pos) {
398 cevent = (struct sctp_ulpevent *)pos->cb;
399
400 if (cevent->stream > event->stream) {
401 loc = pos;
402 break;
403 }
404 if (cevent->stream == event->stream &&
405 MID_lt(event->mid, cevent->mid)) {
406 loc = pos;
407 break;
408 }
409 }
410
411 if (!loc)
412 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
413 else
414 __skb_queue_before(&ulpq->lobby, loc, sctp_event2skb(event));
415}
416
417static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
418 struct sctp_ulpevent *event)
419{
420 struct sk_buff_head *event_list;
421 struct sctp_stream *stream;
422 struct sk_buff *pos, *tmp;
423 __u16 sid = event->stream;
424
425 stream = &ulpq->asoc->stream;
426 event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
427
428 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
429 struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
430
431 if (cevent->stream > sid)
432 break;
433
434 if (cevent->stream < sid)
435 continue;
436
437 if (cevent->mid != sctp_mid_peek(stream, in, sid))
438 break;
439
440 sctp_mid_next(stream, in, sid);
441
442 __skb_unlink(pos, &ulpq->lobby);
443
444 __skb_queue_tail(event_list, pos);
445 }
446}
447
448static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
449 struct sctp_ulpevent *event)
450{
451 struct sctp_stream *stream;
452 __u16 sid;
453
454 stream = &ulpq->asoc->stream;
455 sid = event->stream;
456
457 if (event->mid != sctp_mid_peek(stream, in, sid)) {
458 sctp_intl_store_ordered(ulpq, event);
459 return NULL;
460 }
461
462 sctp_mid_next(stream, in, sid);
463
464 sctp_intl_retrieve_ordered(ulpq, event);
465
466 return event;
467}
468
469static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
470 struct sk_buff_head *skb_list)
471{
472 struct sock *sk = ulpq->asoc->base.sk;
473 struct sctp_sock *sp = sctp_sk(sk);
474 struct sctp_ulpevent *event;
475 struct sk_buff *skb;
476
477 skb = __skb_peek(skb_list);
478 event = sctp_skb2event(skb);
479
480 if (sk->sk_shutdown & RCV_SHUTDOWN &&
481 (sk->sk_shutdown & SEND_SHUTDOWN ||
482 !sctp_ulpevent_is_notification(event)))
483 goto out_free;
484
485 if (!sctp_ulpevent_is_notification(event)) {
486 sk_mark_napi_id(sk, skb);
487 sk_incoming_cpu_update(sk);
488 }
489
490 if (!sctp_ulpevent_is_enabled(event, ulpq->asoc->subscribe))
491 goto out_free;
492
493 if (skb_list)
494 skb_queue_splice_tail_init(skb_list,
495 &sk->sk_receive_queue);
496 else
497 __skb_queue_tail(&sk->sk_receive_queue, skb);
498
499 if (!sp->data_ready_signalled) {
500 sp->data_ready_signalled = 1;
501 sk->sk_data_ready(sk);
502 }
503
504 return 1;
505
506out_free:
507 if (skb_list)
508 sctp_queue_purge_ulpevents(skb_list);
509 else
510 sctp_ulpevent_free(event);
511
512 return 0;
513}
514
515static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
516 struct sctp_ulpevent *event)
517{
518 struct sctp_ulpevent *cevent;
519 struct sk_buff *pos;
520
521 pos = skb_peek_tail(&ulpq->reasm_uo);
522 if (!pos) {
523 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
524 return;
525 }
526
527 cevent = sctp_skb2event(pos);
528
529 if (event->stream == cevent->stream &&
530 event->mid == cevent->mid &&
531 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
532 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
533 event->fsn > cevent->fsn))) {
534 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
535 return;
536 }
537
538 if ((event->stream == cevent->stream &&
539 MID_lt(cevent->mid, event->mid)) ||
540 event->stream > cevent->stream) {
541 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
542 return;
543 }
544
545 skb_queue_walk(&ulpq->reasm_uo, pos) {
546 cevent = sctp_skb2event(pos);
547
548 if (event->stream < cevent->stream ||
549 (event->stream == cevent->stream &&
550 MID_lt(event->mid, cevent->mid)))
551 break;
552
553 if (event->stream == cevent->stream &&
554 event->mid == cevent->mid &&
555 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
556 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
557 event->fsn < cevent->fsn))
558 break;
559 }
560
561 __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
562}
563
564static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
565 struct sctp_ulpq *ulpq,
566 struct sctp_ulpevent *event)
567{
568 struct sk_buff *first_frag = NULL;
569 struct sk_buff *last_frag = NULL;
570 struct sctp_ulpevent *retval;
571 struct sctp_stream_in *sin;
572 struct sk_buff *pos;
573 __u32 next_fsn = 0;
574 int is_last = 0;
575
576 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
577
578 skb_queue_walk(&ulpq->reasm_uo, pos) {
579 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
580
581 if (cevent->stream < event->stream)
582 continue;
583 if (cevent->stream > event->stream)
584 break;
585
586 if (MID_lt(cevent->mid, sin->mid_uo))
587 continue;
588 if (MID_lt(sin->mid_uo, cevent->mid))
589 break;
590
591 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
592 case SCTP_DATA_FIRST_FRAG:
593 goto out;
594 case SCTP_DATA_MIDDLE_FRAG:
595 if (!first_frag) {
596 if (cevent->fsn == sin->fsn_uo) {
597 first_frag = pos;
598 last_frag = pos;
599 next_fsn = cevent->fsn + 1;
600 }
601 } else if (cevent->fsn == next_fsn) {
602 last_frag = pos;
603 next_fsn++;
604 } else {
605 goto out;
606 }
607 break;
608 case SCTP_DATA_LAST_FRAG:
609 if (!first_frag) {
610 if (cevent->fsn == sin->fsn_uo) {
611 first_frag = pos;
612 last_frag = pos;
613 next_fsn = 0;
614 is_last = 1;
615 }
616 } else if (cevent->fsn == next_fsn) {
617 last_frag = pos;
618 next_fsn = 0;
619 is_last = 1;
620 }
621 goto out;
622 default:
623 goto out;
624 }
625 }
626
627out:
628 if (!first_frag)
629 return NULL;
630
631 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
632 &ulpq->reasm_uo, first_frag,
633 last_frag);
634 if (retval) {
635 sin->fsn_uo = next_fsn;
636 if (is_last) {
637 retval->msg_flags |= MSG_EOR;
638 sin->pd_mode_uo = 0;
639 }
640 }
641
642 return retval;
643}
644
645static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
646 struct sctp_ulpq *ulpq,
647 struct sctp_ulpevent *event)
648{
649 struct sctp_association *asoc = ulpq->asoc;
650 struct sk_buff *pos, *first_frag = NULL;
651 struct sctp_ulpevent *retval = NULL;
652 struct sk_buff *pd_first = NULL;
653 struct sk_buff *pd_last = NULL;
654 struct sctp_stream_in *sin;
655 __u32 next_fsn = 0;
656 __u32 pd_point = 0;
657 __u32 pd_len = 0;
658 __u32 mid = 0;
659
660 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
661
662 skb_queue_walk(&ulpq->reasm_uo, pos) {
663 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
664
665 if (cevent->stream < event->stream)
666 continue;
667 if (cevent->stream > event->stream)
668 break;
669
670 if (MID_lt(cevent->mid, event->mid))
671 continue;
672 if (MID_lt(event->mid, cevent->mid))
673 break;
674
675 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
676 case SCTP_DATA_FIRST_FRAG:
677 if (!sin->pd_mode_uo) {
678 sin->mid_uo = cevent->mid;
679 pd_first = pos;
680 pd_last = pos;
681 pd_len = pos->len;
682 }
683
684 first_frag = pos;
685 next_fsn = 0;
686 mid = cevent->mid;
687 break;
688
689 case SCTP_DATA_MIDDLE_FRAG:
690 if (first_frag && cevent->mid == mid &&
691 cevent->fsn == next_fsn) {
692 next_fsn++;
693 if (pd_first) {
694 pd_last = pos;
695 pd_len += pos->len;
696 }
697 } else {
698 first_frag = NULL;
699 }
700 break;
701
702 case SCTP_DATA_LAST_FRAG:
703 if (first_frag && cevent->mid == mid &&
704 cevent->fsn == next_fsn)
705 goto found;
706 else
707 first_frag = NULL;
708 break;
709 }
710 }
711
712 if (!pd_first)
713 goto out;
714
715 pd_point = sctp_sk(asoc->base.sk)->pd_point;
716 if (pd_point && pd_point <= pd_len) {
717 retval = sctp_make_reassembled_event(asoc->base.net,
718 &ulpq->reasm_uo,
719 pd_first, pd_last);
720 if (retval) {
721 sin->fsn_uo = next_fsn;
722 sin->pd_mode_uo = 1;
723 }
724 }
725 goto out;
726
727found:
728 retval = sctp_make_reassembled_event(asoc->base.net, &ulpq->reasm_uo,
729 first_frag, pos);
730 if (retval)
731 retval->msg_flags |= MSG_EOR;
732
733out:
734 return retval;
735}
736
737static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
738 struct sctp_ulpevent *event)
739{
740 struct sctp_ulpevent *retval = NULL;
741 struct sctp_stream_in *sin;
742
743 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
744 event->msg_flags |= MSG_EOR;
745 return event;
746 }
747
748 sctp_intl_store_reasm_uo(ulpq, event);
749
750 sin = sctp_stream_in(&ulpq->asoc->stream, event->stream);
751 if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
752 event->fsn == sin->fsn_uo)
753 retval = sctp_intl_retrieve_partial_uo(ulpq, event);
754
755 if (!retval)
756 retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
757
758 return retval;
759}
760
761static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
762{
763 struct sctp_stream_in *csin, *sin = NULL;
764 struct sk_buff *first_frag = NULL;
765 struct sk_buff *last_frag = NULL;
766 struct sctp_ulpevent *retval;
767 struct sk_buff *pos;
768 __u32 next_fsn = 0;
769 __u16 sid = 0;
770
771 skb_queue_walk(&ulpq->reasm_uo, pos) {
772 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
773
774 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
775 if (csin->pd_mode_uo)
776 continue;
777
778 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
779 case SCTP_DATA_FIRST_FRAG:
780 if (first_frag)
781 goto out;
782 first_frag = pos;
783 last_frag = pos;
784 next_fsn = 0;
785 sin = csin;
786 sid = cevent->stream;
787 sin->mid_uo = cevent->mid;
788 break;
789 case SCTP_DATA_MIDDLE_FRAG:
790 if (!first_frag)
791 break;
792 if (cevent->stream == sid &&
793 cevent->mid == sin->mid_uo &&
794 cevent->fsn == next_fsn) {
795 next_fsn++;
796 last_frag = pos;
797 } else {
798 goto out;
799 }
800 break;
801 case SCTP_DATA_LAST_FRAG:
802 if (first_frag)
803 goto out;
804 break;
805 default:
806 break;
807 }
808 }
809
810 if (!first_frag)
811 return NULL;
812
813out:
814 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
815 &ulpq->reasm_uo, first_frag,
816 last_frag);
817 if (retval) {
818 sin->fsn_uo = next_fsn;
819 sin->pd_mode_uo = 1;
820 }
821
822 return retval;
823}
824
825static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
826 struct sctp_chunk *chunk, gfp_t gfp)
827{
828 struct sctp_ulpevent *event;
829 struct sk_buff_head temp;
830 int event_eor = 0;
831
832 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
833 if (!event)
834 return -ENOMEM;
835
836 event->mid = ntohl(chunk->subh.idata_hdr->mid);
837 if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
838 event->ppid = chunk->subh.idata_hdr->ppid;
839 else
840 event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
841
842 if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
843 event = sctp_intl_reasm(ulpq, event);
844 if (event) {
845 skb_queue_head_init(&temp);
846 __skb_queue_tail(&temp, sctp_event2skb(event));
847
848 if (event->msg_flags & MSG_EOR)
849 event = sctp_intl_order(ulpq, event);
850 }
851 } else {
852 event = sctp_intl_reasm_uo(ulpq, event);
853 if (event) {
854 skb_queue_head_init(&temp);
855 __skb_queue_tail(&temp, sctp_event2skb(event));
856 }
857 }
858
859 if (event) {
860 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
861 sctp_enqueue_event(ulpq, &temp);
862 }
863
864 return event_eor;
865}
866
867static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
868{
869 struct sctp_stream_in *csin, *sin = NULL;
870 struct sk_buff *first_frag = NULL;
871 struct sk_buff *last_frag = NULL;
872 struct sctp_ulpevent *retval;
873 struct sk_buff *pos;
874 __u32 next_fsn = 0;
875 __u16 sid = 0;
876
877 skb_queue_walk(&ulpq->reasm, pos) {
878 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
879
880 csin = sctp_stream_in(&ulpq->asoc->stream, cevent->stream);
881 if (csin->pd_mode)
882 continue;
883
884 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
885 case SCTP_DATA_FIRST_FRAG:
886 if (first_frag)
887 goto out;
888 if (cevent->mid == csin->mid) {
889 first_frag = pos;
890 last_frag = pos;
891 next_fsn = 0;
892 sin = csin;
893 sid = cevent->stream;
894 }
895 break;
896 case SCTP_DATA_MIDDLE_FRAG:
897 if (!first_frag)
898 break;
899 if (cevent->stream == sid &&
900 cevent->mid == sin->mid &&
901 cevent->fsn == next_fsn) {
902 next_fsn++;
903 last_frag = pos;
904 } else {
905 goto out;
906 }
907 break;
908 case SCTP_DATA_LAST_FRAG:
909 if (first_frag)
910 goto out;
911 break;
912 default:
913 break;
914 }
915 }
916
917 if (!first_frag)
918 return NULL;
919
920out:
921 retval = sctp_make_reassembled_event(ulpq->asoc->base.net,
922 &ulpq->reasm, first_frag,
923 last_frag);
924 if (retval) {
925 sin->fsn = next_fsn;
926 sin->pd_mode = 1;
927 }
928
929 return retval;
930}
931
932static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
933{
934 struct sctp_ulpevent *event;
935 struct sk_buff_head temp;
936
937 if (!skb_queue_empty(&ulpq->reasm)) {
938 do {
939 event = sctp_intl_retrieve_first(ulpq);
940 if (event) {
941 skb_queue_head_init(&temp);
942 __skb_queue_tail(&temp, sctp_event2skb(event));
943 sctp_enqueue_event(ulpq, &temp);
944 }
945 } while (event);
946 }
947
948 if (!skb_queue_empty(&ulpq->reasm_uo)) {
949 do {
950 event = sctp_intl_retrieve_first_uo(ulpq);
951 if (event) {
952 skb_queue_head_init(&temp);
953 __skb_queue_tail(&temp, sctp_event2skb(event));
954 sctp_enqueue_event(ulpq, &temp);
955 }
956 } while (event);
957 }
958}
959
960static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
961 gfp_t gfp)
962{
963 struct sctp_association *asoc = ulpq->asoc;
964 __u32 freed = 0;
965 __u16 needed;
966
967 needed = ntohs(chunk->chunk_hdr->length) -
968 sizeof(struct sctp_idata_chunk);
969
970 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
971 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
972 if (freed < needed)
973 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
974 needed);
975 if (freed < needed)
976 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
977 needed);
978 }
979
980 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
981 sctp_intl_start_pd(ulpq, gfp);
982
983 sk_mem_reclaim(asoc->base.sk);
984}
985
986static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
987 __u32 mid, __u16 flags, gfp_t gfp)
988{
989 struct sock *sk = ulpq->asoc->base.sk;
990 struct sctp_ulpevent *ev = NULL;
991
992 if (!sctp_ulpevent_type_enabled(ulpq->asoc->subscribe,
993 SCTP_PARTIAL_DELIVERY_EVENT))
994 return;
995
996 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
997 sid, mid, flags, gfp);
998 if (ev) {
999 struct sctp_sock *sp = sctp_sk(sk);
1000
1001 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1002
1003 if (!sp->data_ready_signalled) {
1004 sp->data_ready_signalled = 1;
1005 sk->sk_data_ready(sk);
1006 }
1007 }
1008}
1009
1010static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1011{
1012 struct sctp_stream *stream = &ulpq->asoc->stream;
1013 struct sctp_ulpevent *cevent, *event = NULL;
1014 struct sk_buff_head *lobby = &ulpq->lobby;
1015 struct sk_buff *pos, *tmp;
1016 struct sk_buff_head temp;
1017 __u16 csid;
1018 __u32 cmid;
1019
1020 skb_queue_head_init(&temp);
1021 sctp_skb_for_each(pos, lobby, tmp) {
1022 cevent = (struct sctp_ulpevent *)pos->cb;
1023 csid = cevent->stream;
1024 cmid = cevent->mid;
1025
1026 if (csid > sid)
1027 break;
1028
1029 if (csid < sid)
1030 continue;
1031
1032 if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
1033 break;
1034
1035 __skb_unlink(pos, lobby);
1036 if (!event)
1037 event = sctp_skb2event(pos);
1038
1039 __skb_queue_tail(&temp, pos);
1040 }
1041
1042 if (!event && pos != (struct sk_buff *)lobby) {
1043 cevent = (struct sctp_ulpevent *)pos->cb;
1044 csid = cevent->stream;
1045 cmid = cevent->mid;
1046
1047 if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
1048 sctp_mid_next(stream, in, csid);
1049 __skb_unlink(pos, lobby);
1050 __skb_queue_tail(&temp, pos);
1051 event = sctp_skb2event(pos);
1052 }
1053 }
1054
1055 if (event) {
1056 sctp_intl_retrieve_ordered(ulpq, event);
1057 sctp_enqueue_event(ulpq, &temp);
1058 }
1059}
1060
1061static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1062{
1063 struct sctp_stream *stream = &ulpq->asoc->stream;
1064 __u16 sid;
1065
1066 for (sid = 0; sid < stream->incnt; sid++) {
1067 struct sctp_stream_in *sin = SCTP_SI(stream, sid);
1068 __u32 mid;
1069
1070 if (sin->pd_mode_uo) {
1071 sin->pd_mode_uo = 0;
1072
1073 mid = sin->mid_uo;
1074 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1075 }
1076
1077 if (sin->pd_mode) {
1078 sin->pd_mode = 0;
1079
1080 mid = sin->mid;
1081 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1082 sctp_mid_skip(stream, in, sid, mid);
1083
1084 sctp_intl_reap_ordered(ulpq, sid);
1085 }
1086 }
1087
1088 /* intl abort pd happens only when all data needs to be cleaned */
1089 sctp_ulpq_flush(ulpq);
1090}
1091
1092static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
1093 int nskips, __be16 stream, __u8 flags)
1094{
1095 int i;
1096
1097 for (i = 0; i < nskips; i++)
1098 if (skiplist[i].stream == stream &&
1099 skiplist[i].flags == flags)
1100 return i;
1101
1102 return i;
1103}
1104
1105#define SCTP_FTSN_U_BIT 0x1
1106static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
1107{
1108 struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
1109 struct sctp_association *asoc = q->asoc;
1110 struct sctp_chunk *ftsn_chunk = NULL;
1111 struct list_head *lchunk, *temp;
1112 int nskips = 0, skip_pos;
1113 struct sctp_chunk *chunk;
1114 __u32 tsn;
1115
1116 if (!asoc->peer.prsctp_capable)
1117 return;
1118
1119 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1120 asoc->adv_peer_ack_point = ctsn;
1121
1122 list_for_each_safe(lchunk, temp, &q->abandoned) {
1123 chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
1124 tsn = ntohl(chunk->subh.data_hdr->tsn);
1125
1126 if (TSN_lte(tsn, ctsn)) {
1127 list_del_init(lchunk);
1128 sctp_chunk_free(chunk);
1129 } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
1130 __be16 sid = chunk->subh.idata_hdr->stream;
1131 __be32 mid = chunk->subh.idata_hdr->mid;
1132 __u8 flags = 0;
1133
1134 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1135 flags |= SCTP_FTSN_U_BIT;
1136
1137 asoc->adv_peer_ack_point = tsn;
1138 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
1139 sid, flags);
1140 ftsn_skip_arr[skip_pos].stream = sid;
1141 ftsn_skip_arr[skip_pos].reserved = 0;
1142 ftsn_skip_arr[skip_pos].flags = flags;
1143 ftsn_skip_arr[skip_pos].mid = mid;
1144 if (skip_pos == nskips)
1145 nskips++;
1146 if (nskips == 10)
1147 break;
1148 } else {
1149 break;
1150 }
1151 }
1152
1153 if (asoc->adv_peer_ack_point > ctsn)
1154 ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
1155 nskips, &ftsn_skip_arr[0]);
1156
1157 if (ftsn_chunk) {
1158 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1159 SCTP_INC_STATS(asoc->base.net, SCTP_MIB_OUTCTRLCHUNKS);
1160 }
1161}
1162
1163#define _sctp_walk_ifwdtsn(pos, chunk, end) \
1164 for (pos = chunk->subh.ifwdtsn_hdr->skip; \
1165 (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
1166
1167#define sctp_walk_ifwdtsn(pos, ch) \
1168 _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1169 sizeof(struct sctp_ifwdtsn_chunk))
1170
1171static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
1172{
1173 struct sctp_fwdtsn_skip *skip;
1174 __u16 incnt;
1175
1176 if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
1177 return false;
1178
1179 incnt = chunk->asoc->stream.incnt;
1180 sctp_walk_fwdtsn(skip, chunk)
1181 if (ntohs(skip->stream) >= incnt)
1182 return false;
1183
1184 return true;
1185}
1186
1187static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
1188{
1189 struct sctp_ifwdtsn_skip *skip;
1190 __u16 incnt;
1191
1192 if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
1193 return false;
1194
1195 incnt = chunk->asoc->stream.incnt;
1196 sctp_walk_ifwdtsn(skip, chunk)
1197 if (ntohs(skip->stream) >= incnt)
1198 return false;
1199
1200 return true;
1201}
1202
1203static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1204{
1205 /* Move the Cumulattive TSN Ack ahead. */
1206 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1207 /* purge the fragmentation queue */
1208 sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1209 /* Abort any in progress partial delivery. */
1210 sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1211}
1212
1213static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1214{
1215 struct sk_buff *pos, *tmp;
1216
1217 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1218 struct sctp_ulpevent *event = sctp_skb2event(pos);
1219 __u32 tsn = event->tsn;
1220
1221 if (TSN_lte(tsn, ftsn)) {
1222 __skb_unlink(pos, &ulpq->reasm);
1223 sctp_ulpevent_free(event);
1224 }
1225 }
1226
1227 skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1228 struct sctp_ulpevent *event = sctp_skb2event(pos);
1229 __u32 tsn = event->tsn;
1230
1231 if (TSN_lte(tsn, ftsn)) {
1232 __skb_unlink(pos, &ulpq->reasm_uo);
1233 sctp_ulpevent_free(event);
1234 }
1235 }
1236}
1237
1238static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1239{
1240 /* Move the Cumulattive TSN Ack ahead. */
1241 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1242 /* purge the fragmentation queue */
1243 sctp_intl_reasm_flushtsn(ulpq, ftsn);
1244 /* abort only when it's for all data */
1245 if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1246 sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1247}
1248
1249static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1250{
1251 struct sctp_fwdtsn_skip *skip;
1252
1253 /* Walk through all the skipped SSNs */
1254 sctp_walk_fwdtsn(skip, chunk)
1255 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1256}
1257
1258static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1259 __u8 flags)
1260{
1261 struct sctp_stream_in *sin = sctp_stream_in(&ulpq->asoc->stream, sid);
1262 struct sctp_stream *stream = &ulpq->asoc->stream;
1263
1264 if (flags & SCTP_FTSN_U_BIT) {
1265 if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
1266 sin->pd_mode_uo = 0;
1267 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1268 GFP_ATOMIC);
1269 }
1270 return;
1271 }
1272
1273 if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
1274 return;
1275
1276 if (sin->pd_mode) {
1277 sin->pd_mode = 0;
1278 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1279 }
1280
1281 sctp_mid_skip(stream, in, sid, mid);
1282
1283 sctp_intl_reap_ordered(ulpq, sid);
1284}
1285
1286static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1287{
1288 struct sctp_ifwdtsn_skip *skip;
1289
1290 /* Walk through all the skipped MIDs and abort stream pd if possible */
1291 sctp_walk_ifwdtsn(skip, chunk)
1292 sctp_intl_skip(ulpq, ntohs(skip->stream),
1293 ntohl(skip->mid), skip->flags);
1294}
1295
1296static int do_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
1297{
1298 struct sk_buff_head temp;
1299
1300 skb_queue_head_init(&temp);
1301 __skb_queue_tail(&temp, sctp_event2skb(event));
1302 return sctp_ulpq_tail_event(ulpq, &temp);
1303}
1304
1305static struct sctp_stream_interleave sctp_stream_interleave_0 = {
1306 .data_chunk_len = sizeof(struct sctp_data_chunk),
1307 .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk),
1308 /* DATA process functions */
1309 .make_datafrag = sctp_make_datafrag_empty,
1310 .assign_number = sctp_chunk_assign_ssn,
1311 .validate_data = sctp_validate_data,
1312 .ulpevent_data = sctp_ulpq_tail_data,
1313 .enqueue_event = do_ulpq_tail_event,
1314 .renege_events = sctp_ulpq_renege,
1315 .start_pd = sctp_ulpq_partial_delivery,
1316 .abort_pd = sctp_ulpq_abort_pd,
1317 /* FORWARD-TSN process functions */
1318 .generate_ftsn = sctp_generate_fwdtsn,
1319 .validate_ftsn = sctp_validate_fwdtsn,
1320 .report_ftsn = sctp_report_fwdtsn,
1321 .handle_ftsn = sctp_handle_fwdtsn,
1322};
1323
1324static int do_sctp_enqueue_event(struct sctp_ulpq *ulpq,
1325 struct sctp_ulpevent *event)
1326{
1327 struct sk_buff_head temp;
1328
1329 skb_queue_head_init(&temp);
1330 __skb_queue_tail(&temp, sctp_event2skb(event));
1331 return sctp_enqueue_event(ulpq, &temp);
1332}
1333
1334static struct sctp_stream_interleave sctp_stream_interleave_1 = {
1335 .data_chunk_len = sizeof(struct sctp_idata_chunk),
1336 .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
1337 /* I-DATA process functions */
1338 .make_datafrag = sctp_make_idatafrag_empty,
1339 .assign_number = sctp_chunk_assign_mid,
1340 .validate_data = sctp_validate_idata,
1341 .ulpevent_data = sctp_ulpevent_idata,
1342 .enqueue_event = do_sctp_enqueue_event,
1343 .renege_events = sctp_renege_events,
1344 .start_pd = sctp_intl_start_pd,
1345 .abort_pd = sctp_intl_abort_pd,
1346 /* I-FORWARD-TSN process functions */
1347 .generate_ftsn = sctp_generate_iftsn,
1348 .validate_ftsn = sctp_validate_iftsn,
1349 .report_ftsn = sctp_report_iftsn,
1350 .handle_ftsn = sctp_handle_iftsn,
1351};
1352
1353void sctp_stream_interleave_init(struct sctp_stream *stream)
1354{
1355 struct sctp_association *asoc;
1356
1357 asoc = container_of(stream, struct sctp_association, stream);
1358 stream->si = asoc->peer.intl_capable ? &sctp_stream_interleave_1
1359 : &sctp_stream_interleave_0;
1360}
1/* SCTP kernel implementation
2 * (C) Copyright Red Hat Inc. 2017
3 *
4 * This file is part of the SCTP kernel implementation
5 *
6 * These functions implement sctp stream message interleaving, mostly
7 * including I-DATA and I-FORWARD-TSN chunks process.
8 *
9 * This SCTP implementation is free software;
10 * you can redistribute it and/or modify it under the terms of
11 * the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
13 * any later version.
14 *
15 * This SCTP implementation is distributed in the hope that it
16 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
17 * ************************
18 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
19 * See the GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with GNU CC; see the file COPYING. If not, see
23 * <http://www.gnu.org/licenses/>.
24 *
25 * Please send any bug reports or fixes you make to the
26 * email addresched(es):
27 * lksctp developers <linux-sctp@vger.kernel.org>
28 *
29 * Written or modified by:
30 * Xin Long <lucien.xin@gmail.com>
31 */
32
33#include <net/busy_poll.h>
34#include <net/sctp/sctp.h>
35#include <net/sctp/sm.h>
36#include <net/sctp/ulpevent.h>
37#include <linux/sctp.h>
38
39static struct sctp_chunk *sctp_make_idatafrag_empty(
40 const struct sctp_association *asoc,
41 const struct sctp_sndrcvinfo *sinfo,
42 int len, __u8 flags, gfp_t gfp)
43{
44 struct sctp_chunk *retval;
45 struct sctp_idatahdr dp;
46
47 memset(&dp, 0, sizeof(dp));
48 dp.stream = htons(sinfo->sinfo_stream);
49
50 if (sinfo->sinfo_flags & SCTP_UNORDERED)
51 flags |= SCTP_DATA_UNORDERED;
52
53 retval = sctp_make_idata(asoc, flags, sizeof(dp) + len, gfp);
54 if (!retval)
55 return NULL;
56
57 retval->subh.idata_hdr = sctp_addto_chunk(retval, sizeof(dp), &dp);
58 memcpy(&retval->sinfo, sinfo, sizeof(struct sctp_sndrcvinfo));
59
60 return retval;
61}
62
63static void sctp_chunk_assign_mid(struct sctp_chunk *chunk)
64{
65 struct sctp_stream *stream;
66 struct sctp_chunk *lchunk;
67 __u32 cfsn = 0;
68 __u16 sid;
69
70 if (chunk->has_mid)
71 return;
72
73 sid = sctp_chunk_stream_no(chunk);
74 stream = &chunk->asoc->stream;
75
76 list_for_each_entry(lchunk, &chunk->msg->chunks, frag_list) {
77 struct sctp_idatahdr *hdr;
78 __u32 mid;
79
80 lchunk->has_mid = 1;
81
82 hdr = lchunk->subh.idata_hdr;
83
84 if (lchunk->chunk_hdr->flags & SCTP_DATA_FIRST_FRAG)
85 hdr->ppid = lchunk->sinfo.sinfo_ppid;
86 else
87 hdr->fsn = htonl(cfsn++);
88
89 if (lchunk->chunk_hdr->flags & SCTP_DATA_UNORDERED) {
90 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
91 sctp_mid_uo_next(stream, out, sid) :
92 sctp_mid_uo_peek(stream, out, sid);
93 } else {
94 mid = lchunk->chunk_hdr->flags & SCTP_DATA_LAST_FRAG ?
95 sctp_mid_next(stream, out, sid) :
96 sctp_mid_peek(stream, out, sid);
97 }
98 hdr->mid = htonl(mid);
99 }
100}
101
102static bool sctp_validate_data(struct sctp_chunk *chunk)
103{
104 const struct sctp_stream *stream;
105 __u16 sid, ssn;
106
107 if (chunk->chunk_hdr->type != SCTP_CID_DATA)
108 return false;
109
110 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
111 return true;
112
113 stream = &chunk->asoc->stream;
114 sid = sctp_chunk_stream_no(chunk);
115 ssn = ntohs(chunk->subh.data_hdr->ssn);
116
117 return !SSN_lt(ssn, sctp_ssn_peek(stream, in, sid));
118}
119
120static bool sctp_validate_idata(struct sctp_chunk *chunk)
121{
122 struct sctp_stream *stream;
123 __u32 mid;
124 __u16 sid;
125
126 if (chunk->chunk_hdr->type != SCTP_CID_I_DATA)
127 return false;
128
129 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
130 return true;
131
132 stream = &chunk->asoc->stream;
133 sid = sctp_chunk_stream_no(chunk);
134 mid = ntohl(chunk->subh.idata_hdr->mid);
135
136 return !MID_lt(mid, sctp_mid_peek(stream, in, sid));
137}
138
139static void sctp_intl_store_reasm(struct sctp_ulpq *ulpq,
140 struct sctp_ulpevent *event)
141{
142 struct sctp_ulpevent *cevent;
143 struct sk_buff *pos;
144
145 pos = skb_peek_tail(&ulpq->reasm);
146 if (!pos) {
147 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
148 return;
149 }
150
151 cevent = sctp_skb2event(pos);
152
153 if (event->stream == cevent->stream &&
154 event->mid == cevent->mid &&
155 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
156 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
157 event->fsn > cevent->fsn))) {
158 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
159 return;
160 }
161
162 if ((event->stream == cevent->stream &&
163 MID_lt(cevent->mid, event->mid)) ||
164 event->stream > cevent->stream) {
165 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
166 return;
167 }
168
169 skb_queue_walk(&ulpq->reasm, pos) {
170 cevent = sctp_skb2event(pos);
171
172 if (event->stream < cevent->stream ||
173 (event->stream == cevent->stream &&
174 MID_lt(event->mid, cevent->mid)))
175 break;
176
177 if (event->stream == cevent->stream &&
178 event->mid == cevent->mid &&
179 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
180 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
181 event->fsn < cevent->fsn))
182 break;
183 }
184
185 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
186}
187
188static struct sctp_ulpevent *sctp_intl_retrieve_partial(
189 struct sctp_ulpq *ulpq,
190 struct sctp_ulpevent *event)
191{
192 struct sk_buff *first_frag = NULL;
193 struct sk_buff *last_frag = NULL;
194 struct sctp_ulpevent *retval;
195 struct sctp_stream_in *sin;
196 struct sk_buff *pos;
197 __u32 next_fsn = 0;
198 int is_last = 0;
199
200 sin = sctp_stream_in(ulpq->asoc, event->stream);
201
202 skb_queue_walk(&ulpq->reasm, pos) {
203 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
204
205 if (cevent->stream < event->stream)
206 continue;
207
208 if (cevent->stream > event->stream ||
209 cevent->mid != sin->mid)
210 break;
211
212 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
213 case SCTP_DATA_FIRST_FRAG:
214 goto out;
215 case SCTP_DATA_MIDDLE_FRAG:
216 if (!first_frag) {
217 if (cevent->fsn == sin->fsn) {
218 first_frag = pos;
219 last_frag = pos;
220 next_fsn = cevent->fsn + 1;
221 }
222 } else if (cevent->fsn == next_fsn) {
223 last_frag = pos;
224 next_fsn++;
225 } else {
226 goto out;
227 }
228 break;
229 case SCTP_DATA_LAST_FRAG:
230 if (!first_frag) {
231 if (cevent->fsn == sin->fsn) {
232 first_frag = pos;
233 last_frag = pos;
234 next_fsn = 0;
235 is_last = 1;
236 }
237 } else if (cevent->fsn == next_fsn) {
238 last_frag = pos;
239 next_fsn = 0;
240 is_last = 1;
241 }
242 goto out;
243 default:
244 goto out;
245 }
246 }
247
248out:
249 if (!first_frag)
250 return NULL;
251
252 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
253 &ulpq->reasm, first_frag,
254 last_frag);
255 if (retval) {
256 sin->fsn = next_fsn;
257 if (is_last) {
258 retval->msg_flags |= MSG_EOR;
259 sin->pd_mode = 0;
260 }
261 }
262
263 return retval;
264}
265
266static struct sctp_ulpevent *sctp_intl_retrieve_reassembled(
267 struct sctp_ulpq *ulpq,
268 struct sctp_ulpevent *event)
269{
270 struct sctp_association *asoc = ulpq->asoc;
271 struct sk_buff *pos, *first_frag = NULL;
272 struct sctp_ulpevent *retval = NULL;
273 struct sk_buff *pd_first = NULL;
274 struct sk_buff *pd_last = NULL;
275 struct sctp_stream_in *sin;
276 __u32 next_fsn = 0;
277 __u32 pd_point = 0;
278 __u32 pd_len = 0;
279 __u32 mid = 0;
280
281 sin = sctp_stream_in(ulpq->asoc, event->stream);
282
283 skb_queue_walk(&ulpq->reasm, pos) {
284 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
285
286 if (cevent->stream < event->stream)
287 continue;
288 if (cevent->stream > event->stream)
289 break;
290
291 if (MID_lt(cevent->mid, event->mid))
292 continue;
293 if (MID_lt(event->mid, cevent->mid))
294 break;
295
296 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
297 case SCTP_DATA_FIRST_FRAG:
298 if (cevent->mid == sin->mid) {
299 pd_first = pos;
300 pd_last = pos;
301 pd_len = pos->len;
302 }
303
304 first_frag = pos;
305 next_fsn = 0;
306 mid = cevent->mid;
307 break;
308
309 case SCTP_DATA_MIDDLE_FRAG:
310 if (first_frag && cevent->mid == mid &&
311 cevent->fsn == next_fsn) {
312 next_fsn++;
313 if (pd_first) {
314 pd_last = pos;
315 pd_len += pos->len;
316 }
317 } else {
318 first_frag = NULL;
319 }
320 break;
321
322 case SCTP_DATA_LAST_FRAG:
323 if (first_frag && cevent->mid == mid &&
324 cevent->fsn == next_fsn)
325 goto found;
326 else
327 first_frag = NULL;
328 break;
329 }
330 }
331
332 if (!pd_first)
333 goto out;
334
335 pd_point = sctp_sk(asoc->base.sk)->pd_point;
336 if (pd_point && pd_point <= pd_len) {
337 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
338 &ulpq->reasm,
339 pd_first, pd_last);
340 if (retval) {
341 sin->fsn = next_fsn;
342 sin->pd_mode = 1;
343 }
344 }
345 goto out;
346
347found:
348 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
349 &ulpq->reasm,
350 first_frag, pos);
351 if (retval)
352 retval->msg_flags |= MSG_EOR;
353
354out:
355 return retval;
356}
357
358static struct sctp_ulpevent *sctp_intl_reasm(struct sctp_ulpq *ulpq,
359 struct sctp_ulpevent *event)
360{
361 struct sctp_ulpevent *retval = NULL;
362 struct sctp_stream_in *sin;
363
364 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
365 event->msg_flags |= MSG_EOR;
366 return event;
367 }
368
369 sctp_intl_store_reasm(ulpq, event);
370
371 sin = sctp_stream_in(ulpq->asoc, event->stream);
372 if (sin->pd_mode && event->mid == sin->mid &&
373 event->fsn == sin->fsn)
374 retval = sctp_intl_retrieve_partial(ulpq, event);
375
376 if (!retval)
377 retval = sctp_intl_retrieve_reassembled(ulpq, event);
378
379 return retval;
380}
381
382static void sctp_intl_store_ordered(struct sctp_ulpq *ulpq,
383 struct sctp_ulpevent *event)
384{
385 struct sctp_ulpevent *cevent;
386 struct sk_buff *pos;
387
388 pos = skb_peek_tail(&ulpq->lobby);
389 if (!pos) {
390 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
391 return;
392 }
393
394 cevent = (struct sctp_ulpevent *)pos->cb;
395 if (event->stream == cevent->stream &&
396 MID_lt(cevent->mid, event->mid)) {
397 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
398 return;
399 }
400
401 if (event->stream > cevent->stream) {
402 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
403 return;
404 }
405
406 skb_queue_walk(&ulpq->lobby, pos) {
407 cevent = (struct sctp_ulpevent *)pos->cb;
408
409 if (cevent->stream > event->stream)
410 break;
411
412 if (cevent->stream == event->stream &&
413 MID_lt(event->mid, cevent->mid))
414 break;
415 }
416
417 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
418}
419
420static void sctp_intl_retrieve_ordered(struct sctp_ulpq *ulpq,
421 struct sctp_ulpevent *event)
422{
423 struct sk_buff_head *event_list;
424 struct sctp_stream *stream;
425 struct sk_buff *pos, *tmp;
426 __u16 sid = event->stream;
427
428 stream = &ulpq->asoc->stream;
429 event_list = (struct sk_buff_head *)sctp_event2skb(event)->prev;
430
431 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
432 struct sctp_ulpevent *cevent = (struct sctp_ulpevent *)pos->cb;
433
434 if (cevent->stream > sid)
435 break;
436
437 if (cevent->stream < sid)
438 continue;
439
440 if (cevent->mid != sctp_mid_peek(stream, in, sid))
441 break;
442
443 sctp_mid_next(stream, in, sid);
444
445 __skb_unlink(pos, &ulpq->lobby);
446
447 __skb_queue_tail(event_list, pos);
448 }
449}
450
451static struct sctp_ulpevent *sctp_intl_order(struct sctp_ulpq *ulpq,
452 struct sctp_ulpevent *event)
453{
454 struct sctp_stream *stream;
455 __u16 sid;
456
457 stream = &ulpq->asoc->stream;
458 sid = event->stream;
459
460 if (event->mid != sctp_mid_peek(stream, in, sid)) {
461 sctp_intl_store_ordered(ulpq, event);
462 return NULL;
463 }
464
465 sctp_mid_next(stream, in, sid);
466
467 sctp_intl_retrieve_ordered(ulpq, event);
468
469 return event;
470}
471
472static int sctp_enqueue_event(struct sctp_ulpq *ulpq,
473 struct sctp_ulpevent *event)
474{
475 struct sk_buff *skb = sctp_event2skb(event);
476 struct sock *sk = ulpq->asoc->base.sk;
477 struct sctp_sock *sp = sctp_sk(sk);
478 struct sk_buff_head *skb_list;
479
480 skb_list = (struct sk_buff_head *)skb->prev;
481
482 if (sk->sk_shutdown & RCV_SHUTDOWN &&
483 (sk->sk_shutdown & SEND_SHUTDOWN ||
484 !sctp_ulpevent_is_notification(event)))
485 goto out_free;
486
487 if (!sctp_ulpevent_is_notification(event)) {
488 sk_mark_napi_id(sk, skb);
489 sk_incoming_cpu_update(sk);
490 }
491
492 if (!sctp_ulpevent_is_enabled(event, &sp->subscribe))
493 goto out_free;
494
495 if (skb_list)
496 skb_queue_splice_tail_init(skb_list,
497 &sk->sk_receive_queue);
498 else
499 __skb_queue_tail(&sk->sk_receive_queue, skb);
500
501 if (!sp->data_ready_signalled) {
502 sp->data_ready_signalled = 1;
503 sk->sk_data_ready(sk);
504 }
505
506 return 1;
507
508out_free:
509 if (skb_list)
510 sctp_queue_purge_ulpevents(skb_list);
511 else
512 sctp_ulpevent_free(event);
513
514 return 0;
515}
516
517static void sctp_intl_store_reasm_uo(struct sctp_ulpq *ulpq,
518 struct sctp_ulpevent *event)
519{
520 struct sctp_ulpevent *cevent;
521 struct sk_buff *pos;
522
523 pos = skb_peek_tail(&ulpq->reasm_uo);
524 if (!pos) {
525 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
526 return;
527 }
528
529 cevent = sctp_skb2event(pos);
530
531 if (event->stream == cevent->stream &&
532 event->mid == cevent->mid &&
533 (cevent->msg_flags & SCTP_DATA_FIRST_FRAG ||
534 (!(event->msg_flags & SCTP_DATA_FIRST_FRAG) &&
535 event->fsn > cevent->fsn))) {
536 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
537 return;
538 }
539
540 if ((event->stream == cevent->stream &&
541 MID_lt(cevent->mid, event->mid)) ||
542 event->stream > cevent->stream) {
543 __skb_queue_tail(&ulpq->reasm_uo, sctp_event2skb(event));
544 return;
545 }
546
547 skb_queue_walk(&ulpq->reasm_uo, pos) {
548 cevent = sctp_skb2event(pos);
549
550 if (event->stream < cevent->stream ||
551 (event->stream == cevent->stream &&
552 MID_lt(event->mid, cevent->mid)))
553 break;
554
555 if (event->stream == cevent->stream &&
556 event->mid == cevent->mid &&
557 !(cevent->msg_flags & SCTP_DATA_FIRST_FRAG) &&
558 (event->msg_flags & SCTP_DATA_FIRST_FRAG ||
559 event->fsn < cevent->fsn))
560 break;
561 }
562
563 __skb_queue_before(&ulpq->reasm_uo, pos, sctp_event2skb(event));
564}
565
566static struct sctp_ulpevent *sctp_intl_retrieve_partial_uo(
567 struct sctp_ulpq *ulpq,
568 struct sctp_ulpevent *event)
569{
570 struct sk_buff *first_frag = NULL;
571 struct sk_buff *last_frag = NULL;
572 struct sctp_ulpevent *retval;
573 struct sctp_stream_in *sin;
574 struct sk_buff *pos;
575 __u32 next_fsn = 0;
576 int is_last = 0;
577
578 sin = sctp_stream_in(ulpq->asoc, event->stream);
579
580 skb_queue_walk(&ulpq->reasm_uo, pos) {
581 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
582
583 if (cevent->stream < event->stream)
584 continue;
585 if (cevent->stream > event->stream)
586 break;
587
588 if (MID_lt(cevent->mid, sin->mid_uo))
589 continue;
590 if (MID_lt(sin->mid_uo, cevent->mid))
591 break;
592
593 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
594 case SCTP_DATA_FIRST_FRAG:
595 goto out;
596 case SCTP_DATA_MIDDLE_FRAG:
597 if (!first_frag) {
598 if (cevent->fsn == sin->fsn_uo) {
599 first_frag = pos;
600 last_frag = pos;
601 next_fsn = cevent->fsn + 1;
602 }
603 } else if (cevent->fsn == next_fsn) {
604 last_frag = pos;
605 next_fsn++;
606 } else {
607 goto out;
608 }
609 break;
610 case SCTP_DATA_LAST_FRAG:
611 if (!first_frag) {
612 if (cevent->fsn == sin->fsn_uo) {
613 first_frag = pos;
614 last_frag = pos;
615 next_fsn = 0;
616 is_last = 1;
617 }
618 } else if (cevent->fsn == next_fsn) {
619 last_frag = pos;
620 next_fsn = 0;
621 is_last = 1;
622 }
623 goto out;
624 default:
625 goto out;
626 }
627 }
628
629out:
630 if (!first_frag)
631 return NULL;
632
633 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
634 &ulpq->reasm_uo, first_frag,
635 last_frag);
636 if (retval) {
637 sin->fsn_uo = next_fsn;
638 if (is_last) {
639 retval->msg_flags |= MSG_EOR;
640 sin->pd_mode_uo = 0;
641 }
642 }
643
644 return retval;
645}
646
647static struct sctp_ulpevent *sctp_intl_retrieve_reassembled_uo(
648 struct sctp_ulpq *ulpq,
649 struct sctp_ulpevent *event)
650{
651 struct sctp_association *asoc = ulpq->asoc;
652 struct sk_buff *pos, *first_frag = NULL;
653 struct sctp_ulpevent *retval = NULL;
654 struct sk_buff *pd_first = NULL;
655 struct sk_buff *pd_last = NULL;
656 struct sctp_stream_in *sin;
657 __u32 next_fsn = 0;
658 __u32 pd_point = 0;
659 __u32 pd_len = 0;
660 __u32 mid = 0;
661
662 sin = sctp_stream_in(ulpq->asoc, event->stream);
663
664 skb_queue_walk(&ulpq->reasm_uo, pos) {
665 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
666
667 if (cevent->stream < event->stream)
668 continue;
669 if (cevent->stream > event->stream)
670 break;
671
672 if (MID_lt(cevent->mid, event->mid))
673 continue;
674 if (MID_lt(event->mid, cevent->mid))
675 break;
676
677 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
678 case SCTP_DATA_FIRST_FRAG:
679 if (!sin->pd_mode_uo) {
680 sin->mid_uo = cevent->mid;
681 pd_first = pos;
682 pd_last = pos;
683 pd_len = pos->len;
684 }
685
686 first_frag = pos;
687 next_fsn = 0;
688 mid = cevent->mid;
689 break;
690
691 case SCTP_DATA_MIDDLE_FRAG:
692 if (first_frag && cevent->mid == mid &&
693 cevent->fsn == next_fsn) {
694 next_fsn++;
695 if (pd_first) {
696 pd_last = pos;
697 pd_len += pos->len;
698 }
699 } else {
700 first_frag = NULL;
701 }
702 break;
703
704 case SCTP_DATA_LAST_FRAG:
705 if (first_frag && cevent->mid == mid &&
706 cevent->fsn == next_fsn)
707 goto found;
708 else
709 first_frag = NULL;
710 break;
711 }
712 }
713
714 if (!pd_first)
715 goto out;
716
717 pd_point = sctp_sk(asoc->base.sk)->pd_point;
718 if (pd_point && pd_point <= pd_len) {
719 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
720 &ulpq->reasm_uo,
721 pd_first, pd_last);
722 if (retval) {
723 sin->fsn_uo = next_fsn;
724 sin->pd_mode_uo = 1;
725 }
726 }
727 goto out;
728
729found:
730 retval = sctp_make_reassembled_event(sock_net(asoc->base.sk),
731 &ulpq->reasm_uo,
732 first_frag, pos);
733 if (retval)
734 retval->msg_flags |= MSG_EOR;
735
736out:
737 return retval;
738}
739
740static struct sctp_ulpevent *sctp_intl_reasm_uo(struct sctp_ulpq *ulpq,
741 struct sctp_ulpevent *event)
742{
743 struct sctp_ulpevent *retval = NULL;
744 struct sctp_stream_in *sin;
745
746 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
747 event->msg_flags |= MSG_EOR;
748 return event;
749 }
750
751 sctp_intl_store_reasm_uo(ulpq, event);
752
753 sin = sctp_stream_in(ulpq->asoc, event->stream);
754 if (sin->pd_mode_uo && event->mid == sin->mid_uo &&
755 event->fsn == sin->fsn_uo)
756 retval = sctp_intl_retrieve_partial_uo(ulpq, event);
757
758 if (!retval)
759 retval = sctp_intl_retrieve_reassembled_uo(ulpq, event);
760
761 return retval;
762}
763
764static struct sctp_ulpevent *sctp_intl_retrieve_first_uo(struct sctp_ulpq *ulpq)
765{
766 struct sctp_stream_in *csin, *sin = NULL;
767 struct sk_buff *first_frag = NULL;
768 struct sk_buff *last_frag = NULL;
769 struct sctp_ulpevent *retval;
770 struct sk_buff *pos;
771 __u32 next_fsn = 0;
772 __u16 sid = 0;
773
774 skb_queue_walk(&ulpq->reasm_uo, pos) {
775 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
776
777 csin = sctp_stream_in(ulpq->asoc, cevent->stream);
778 if (csin->pd_mode_uo)
779 continue;
780
781 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
782 case SCTP_DATA_FIRST_FRAG:
783 if (first_frag)
784 goto out;
785 first_frag = pos;
786 last_frag = pos;
787 next_fsn = 0;
788 sin = csin;
789 sid = cevent->stream;
790 sin->mid_uo = cevent->mid;
791 break;
792 case SCTP_DATA_MIDDLE_FRAG:
793 if (!first_frag)
794 break;
795 if (cevent->stream == sid &&
796 cevent->mid == sin->mid_uo &&
797 cevent->fsn == next_fsn) {
798 next_fsn++;
799 last_frag = pos;
800 } else {
801 goto out;
802 }
803 break;
804 case SCTP_DATA_LAST_FRAG:
805 if (first_frag)
806 goto out;
807 break;
808 default:
809 break;
810 }
811 }
812
813 if (!first_frag)
814 return NULL;
815
816out:
817 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
818 &ulpq->reasm_uo, first_frag,
819 last_frag);
820 if (retval) {
821 sin->fsn_uo = next_fsn;
822 sin->pd_mode_uo = 1;
823 }
824
825 return retval;
826}
827
828static int sctp_ulpevent_idata(struct sctp_ulpq *ulpq,
829 struct sctp_chunk *chunk, gfp_t gfp)
830{
831 struct sctp_ulpevent *event;
832 struct sk_buff_head temp;
833 int event_eor = 0;
834
835 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
836 if (!event)
837 return -ENOMEM;
838
839 event->mid = ntohl(chunk->subh.idata_hdr->mid);
840 if (event->msg_flags & SCTP_DATA_FIRST_FRAG)
841 event->ppid = chunk->subh.idata_hdr->ppid;
842 else
843 event->fsn = ntohl(chunk->subh.idata_hdr->fsn);
844
845 if (!(event->msg_flags & SCTP_DATA_UNORDERED)) {
846 event = sctp_intl_reasm(ulpq, event);
847 if (event && event->msg_flags & MSG_EOR) {
848 skb_queue_head_init(&temp);
849 __skb_queue_tail(&temp, sctp_event2skb(event));
850
851 event = sctp_intl_order(ulpq, event);
852 }
853 } else {
854 event = sctp_intl_reasm_uo(ulpq, event);
855 }
856
857 if (event) {
858 event_eor = (event->msg_flags & MSG_EOR) ? 1 : 0;
859 sctp_enqueue_event(ulpq, event);
860 }
861
862 return event_eor;
863}
864
865static struct sctp_ulpevent *sctp_intl_retrieve_first(struct sctp_ulpq *ulpq)
866{
867 struct sctp_stream_in *csin, *sin = NULL;
868 struct sk_buff *first_frag = NULL;
869 struct sk_buff *last_frag = NULL;
870 struct sctp_ulpevent *retval;
871 struct sk_buff *pos;
872 __u32 next_fsn = 0;
873 __u16 sid = 0;
874
875 skb_queue_walk(&ulpq->reasm, pos) {
876 struct sctp_ulpevent *cevent = sctp_skb2event(pos);
877
878 csin = sctp_stream_in(ulpq->asoc, cevent->stream);
879 if (csin->pd_mode)
880 continue;
881
882 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
883 case SCTP_DATA_FIRST_FRAG:
884 if (first_frag)
885 goto out;
886 if (cevent->mid == csin->mid) {
887 first_frag = pos;
888 last_frag = pos;
889 next_fsn = 0;
890 sin = csin;
891 sid = cevent->stream;
892 }
893 break;
894 case SCTP_DATA_MIDDLE_FRAG:
895 if (!first_frag)
896 break;
897 if (cevent->stream == sid &&
898 cevent->mid == sin->mid &&
899 cevent->fsn == next_fsn) {
900 next_fsn++;
901 last_frag = pos;
902 } else {
903 goto out;
904 }
905 break;
906 case SCTP_DATA_LAST_FRAG:
907 if (first_frag)
908 goto out;
909 break;
910 default:
911 break;
912 }
913 }
914
915 if (!first_frag)
916 return NULL;
917
918out:
919 retval = sctp_make_reassembled_event(sock_net(ulpq->asoc->base.sk),
920 &ulpq->reasm, first_frag,
921 last_frag);
922 if (retval) {
923 sin->fsn = next_fsn;
924 sin->pd_mode = 1;
925 }
926
927 return retval;
928}
929
930static void sctp_intl_start_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
931{
932 struct sctp_ulpevent *event;
933
934 if (!skb_queue_empty(&ulpq->reasm)) {
935 do {
936 event = sctp_intl_retrieve_first(ulpq);
937 if (event)
938 sctp_enqueue_event(ulpq, event);
939 } while (event);
940 }
941
942 if (!skb_queue_empty(&ulpq->reasm_uo)) {
943 do {
944 event = sctp_intl_retrieve_first_uo(ulpq);
945 if (event)
946 sctp_enqueue_event(ulpq, event);
947 } while (event);
948 }
949}
950
951static void sctp_renege_events(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
952 gfp_t gfp)
953{
954 struct sctp_association *asoc = ulpq->asoc;
955 __u32 freed = 0;
956 __u16 needed;
957
958 needed = ntohs(chunk->chunk_hdr->length) -
959 sizeof(struct sctp_idata_chunk);
960
961 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
962 freed = sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
963 if (freed < needed)
964 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm,
965 needed);
966 if (freed < needed)
967 freed += sctp_ulpq_renege_list(ulpq, &ulpq->reasm_uo,
968 needed);
969 }
970
971 if (freed >= needed && sctp_ulpevent_idata(ulpq, chunk, gfp) <= 0)
972 sctp_intl_start_pd(ulpq, gfp);
973
974 sk_mem_reclaim(asoc->base.sk);
975}
976
977static void sctp_intl_stream_abort_pd(struct sctp_ulpq *ulpq, __u16 sid,
978 __u32 mid, __u16 flags, gfp_t gfp)
979{
980 struct sock *sk = ulpq->asoc->base.sk;
981 struct sctp_ulpevent *ev = NULL;
982
983 if (!sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
984 &sctp_sk(sk)->subscribe))
985 return;
986
987 ev = sctp_ulpevent_make_pdapi(ulpq->asoc, SCTP_PARTIAL_DELIVERY_ABORTED,
988 sid, mid, flags, gfp);
989 if (ev) {
990 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
991
992 if (!sctp_sk(sk)->data_ready_signalled) {
993 sctp_sk(sk)->data_ready_signalled = 1;
994 sk->sk_data_ready(sk);
995 }
996 }
997}
998
999static void sctp_intl_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
1000{
1001 struct sctp_stream *stream = &ulpq->asoc->stream;
1002 struct sctp_ulpevent *cevent, *event = NULL;
1003 struct sk_buff_head *lobby = &ulpq->lobby;
1004 struct sk_buff *pos, *tmp;
1005 struct sk_buff_head temp;
1006 __u16 csid;
1007 __u32 cmid;
1008
1009 skb_queue_head_init(&temp);
1010 sctp_skb_for_each(pos, lobby, tmp) {
1011 cevent = (struct sctp_ulpevent *)pos->cb;
1012 csid = cevent->stream;
1013 cmid = cevent->mid;
1014
1015 if (csid > sid)
1016 break;
1017
1018 if (csid < sid)
1019 continue;
1020
1021 if (!MID_lt(cmid, sctp_mid_peek(stream, in, csid)))
1022 break;
1023
1024 __skb_unlink(pos, lobby);
1025 if (!event)
1026 event = sctp_skb2event(pos);
1027
1028 __skb_queue_tail(&temp, pos);
1029 }
1030
1031 if (!event && pos != (struct sk_buff *)lobby) {
1032 cevent = (struct sctp_ulpevent *)pos->cb;
1033 csid = cevent->stream;
1034 cmid = cevent->mid;
1035
1036 if (csid == sid && cmid == sctp_mid_peek(stream, in, csid)) {
1037 sctp_mid_next(stream, in, csid);
1038 __skb_unlink(pos, lobby);
1039 __skb_queue_tail(&temp, pos);
1040 event = sctp_skb2event(pos);
1041 }
1042 }
1043
1044 if (event) {
1045 sctp_intl_retrieve_ordered(ulpq, event);
1046 sctp_enqueue_event(ulpq, event);
1047 }
1048}
1049
1050static void sctp_intl_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1051{
1052 struct sctp_stream *stream = &ulpq->asoc->stream;
1053 __u16 sid;
1054
1055 for (sid = 0; sid < stream->incnt; sid++) {
1056 struct sctp_stream_in *sin = &stream->in[sid];
1057 __u32 mid;
1058
1059 if (sin->pd_mode_uo) {
1060 sin->pd_mode_uo = 0;
1061
1062 mid = sin->mid_uo;
1063 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1, gfp);
1064 }
1065
1066 if (sin->pd_mode) {
1067 sin->pd_mode = 0;
1068
1069 mid = sin->mid;
1070 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0, gfp);
1071 sctp_mid_skip(stream, in, sid, mid);
1072
1073 sctp_intl_reap_ordered(ulpq, sid);
1074 }
1075 }
1076
1077 /* intl abort pd happens only when all data needs to be cleaned */
1078 sctp_ulpq_flush(ulpq);
1079}
1080
1081static inline int sctp_get_skip_pos(struct sctp_ifwdtsn_skip *skiplist,
1082 int nskips, __be16 stream, __u8 flags)
1083{
1084 int i;
1085
1086 for (i = 0; i < nskips; i++)
1087 if (skiplist[i].stream == stream &&
1088 skiplist[i].flags == flags)
1089 return i;
1090
1091 return i;
1092}
1093
1094#define SCTP_FTSN_U_BIT 0x1
1095static void sctp_generate_iftsn(struct sctp_outq *q, __u32 ctsn)
1096{
1097 struct sctp_ifwdtsn_skip ftsn_skip_arr[10];
1098 struct sctp_association *asoc = q->asoc;
1099 struct sctp_chunk *ftsn_chunk = NULL;
1100 struct list_head *lchunk, *temp;
1101 int nskips = 0, skip_pos;
1102 struct sctp_chunk *chunk;
1103 __u32 tsn;
1104
1105 if (!asoc->peer.prsctp_capable)
1106 return;
1107
1108 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1109 asoc->adv_peer_ack_point = ctsn;
1110
1111 list_for_each_safe(lchunk, temp, &q->abandoned) {
1112 chunk = list_entry(lchunk, struct sctp_chunk, transmitted_list);
1113 tsn = ntohl(chunk->subh.data_hdr->tsn);
1114
1115 if (TSN_lte(tsn, ctsn)) {
1116 list_del_init(lchunk);
1117 sctp_chunk_free(chunk);
1118 } else if (TSN_lte(tsn, asoc->adv_peer_ack_point + 1)) {
1119 __be16 sid = chunk->subh.idata_hdr->stream;
1120 __be32 mid = chunk->subh.idata_hdr->mid;
1121 __u8 flags = 0;
1122
1123 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
1124 flags |= SCTP_FTSN_U_BIT;
1125
1126 asoc->adv_peer_ack_point = tsn;
1127 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0], nskips,
1128 sid, flags);
1129 ftsn_skip_arr[skip_pos].stream = sid;
1130 ftsn_skip_arr[skip_pos].reserved = 0;
1131 ftsn_skip_arr[skip_pos].flags = flags;
1132 ftsn_skip_arr[skip_pos].mid = mid;
1133 if (skip_pos == nskips)
1134 nskips++;
1135 if (nskips == 10)
1136 break;
1137 } else {
1138 break;
1139 }
1140 }
1141
1142 if (asoc->adv_peer_ack_point > ctsn)
1143 ftsn_chunk = sctp_make_ifwdtsn(asoc, asoc->adv_peer_ack_point,
1144 nskips, &ftsn_skip_arr[0]);
1145
1146 if (ftsn_chunk) {
1147 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1148 SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_OUTCTRLCHUNKS);
1149 }
1150}
1151
1152#define _sctp_walk_ifwdtsn(pos, chunk, end) \
1153 for (pos = chunk->subh.ifwdtsn_hdr->skip; \
1154 (void *)pos < (void *)chunk->subh.ifwdtsn_hdr->skip + (end); pos++)
1155
1156#define sctp_walk_ifwdtsn(pos, ch) \
1157 _sctp_walk_ifwdtsn((pos), (ch), ntohs((ch)->chunk_hdr->length) - \
1158 sizeof(struct sctp_ifwdtsn_chunk))
1159
1160static bool sctp_validate_fwdtsn(struct sctp_chunk *chunk)
1161{
1162 struct sctp_fwdtsn_skip *skip;
1163 __u16 incnt;
1164
1165 if (chunk->chunk_hdr->type != SCTP_CID_FWD_TSN)
1166 return false;
1167
1168 incnt = chunk->asoc->stream.incnt;
1169 sctp_walk_fwdtsn(skip, chunk)
1170 if (ntohs(skip->stream) >= incnt)
1171 return false;
1172
1173 return true;
1174}
1175
1176static bool sctp_validate_iftsn(struct sctp_chunk *chunk)
1177{
1178 struct sctp_ifwdtsn_skip *skip;
1179 __u16 incnt;
1180
1181 if (chunk->chunk_hdr->type != SCTP_CID_I_FWD_TSN)
1182 return false;
1183
1184 incnt = chunk->asoc->stream.incnt;
1185 sctp_walk_ifwdtsn(skip, chunk)
1186 if (ntohs(skip->stream) >= incnt)
1187 return false;
1188
1189 return true;
1190}
1191
1192static void sctp_report_fwdtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1193{
1194 /* Move the Cumulattive TSN Ack ahead. */
1195 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1196 /* purge the fragmentation queue */
1197 sctp_ulpq_reasm_flushtsn(ulpq, ftsn);
1198 /* Abort any in progress partial delivery. */
1199 sctp_ulpq_abort_pd(ulpq, GFP_ATOMIC);
1200}
1201
1202static void sctp_intl_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1203{
1204 struct sk_buff *pos, *tmp;
1205
1206 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
1207 struct sctp_ulpevent *event = sctp_skb2event(pos);
1208 __u32 tsn = event->tsn;
1209
1210 if (TSN_lte(tsn, ftsn)) {
1211 __skb_unlink(pos, &ulpq->reasm);
1212 sctp_ulpevent_free(event);
1213 }
1214 }
1215
1216 skb_queue_walk_safe(&ulpq->reasm_uo, pos, tmp) {
1217 struct sctp_ulpevent *event = sctp_skb2event(pos);
1218 __u32 tsn = event->tsn;
1219
1220 if (TSN_lte(tsn, ftsn)) {
1221 __skb_unlink(pos, &ulpq->reasm_uo);
1222 sctp_ulpevent_free(event);
1223 }
1224 }
1225}
1226
1227static void sctp_report_iftsn(struct sctp_ulpq *ulpq, __u32 ftsn)
1228{
1229 /* Move the Cumulattive TSN Ack ahead. */
1230 sctp_tsnmap_skip(&ulpq->asoc->peer.tsn_map, ftsn);
1231 /* purge the fragmentation queue */
1232 sctp_intl_reasm_flushtsn(ulpq, ftsn);
1233 /* abort only when it's for all data */
1234 if (ftsn == sctp_tsnmap_get_max_tsn_seen(&ulpq->asoc->peer.tsn_map))
1235 sctp_intl_abort_pd(ulpq, GFP_ATOMIC);
1236}
1237
1238static void sctp_handle_fwdtsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1239{
1240 struct sctp_fwdtsn_skip *skip;
1241
1242 /* Walk through all the skipped SSNs */
1243 sctp_walk_fwdtsn(skip, chunk)
1244 sctp_ulpq_skip(ulpq, ntohs(skip->stream), ntohs(skip->ssn));
1245}
1246
1247static void sctp_intl_skip(struct sctp_ulpq *ulpq, __u16 sid, __u32 mid,
1248 __u8 flags)
1249{
1250 struct sctp_stream_in *sin = sctp_stream_in(ulpq->asoc, sid);
1251 struct sctp_stream *stream = &ulpq->asoc->stream;
1252
1253 if (flags & SCTP_FTSN_U_BIT) {
1254 if (sin->pd_mode_uo && MID_lt(sin->mid_uo, mid)) {
1255 sin->pd_mode_uo = 0;
1256 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x1,
1257 GFP_ATOMIC);
1258 }
1259 return;
1260 }
1261
1262 if (MID_lt(mid, sctp_mid_peek(stream, in, sid)))
1263 return;
1264
1265 if (sin->pd_mode) {
1266 sin->pd_mode = 0;
1267 sctp_intl_stream_abort_pd(ulpq, sid, mid, 0x0, GFP_ATOMIC);
1268 }
1269
1270 sctp_mid_skip(stream, in, sid, mid);
1271
1272 sctp_intl_reap_ordered(ulpq, sid);
1273}
1274
1275static void sctp_handle_iftsn(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk)
1276{
1277 struct sctp_ifwdtsn_skip *skip;
1278
1279 /* Walk through all the skipped MIDs and abort stream pd if possible */
1280 sctp_walk_ifwdtsn(skip, chunk)
1281 sctp_intl_skip(ulpq, ntohs(skip->stream),
1282 ntohl(skip->mid), skip->flags);
1283}
1284
1285static struct sctp_stream_interleave sctp_stream_interleave_0 = {
1286 .data_chunk_len = sizeof(struct sctp_data_chunk),
1287 .ftsn_chunk_len = sizeof(struct sctp_fwdtsn_chunk),
1288 /* DATA process functions */
1289 .make_datafrag = sctp_make_datafrag_empty,
1290 .assign_number = sctp_chunk_assign_ssn,
1291 .validate_data = sctp_validate_data,
1292 .ulpevent_data = sctp_ulpq_tail_data,
1293 .enqueue_event = sctp_ulpq_tail_event,
1294 .renege_events = sctp_ulpq_renege,
1295 .start_pd = sctp_ulpq_partial_delivery,
1296 .abort_pd = sctp_ulpq_abort_pd,
1297 /* FORWARD-TSN process functions */
1298 .generate_ftsn = sctp_generate_fwdtsn,
1299 .validate_ftsn = sctp_validate_fwdtsn,
1300 .report_ftsn = sctp_report_fwdtsn,
1301 .handle_ftsn = sctp_handle_fwdtsn,
1302};
1303
1304static struct sctp_stream_interleave sctp_stream_interleave_1 = {
1305 .data_chunk_len = sizeof(struct sctp_idata_chunk),
1306 .ftsn_chunk_len = sizeof(struct sctp_ifwdtsn_chunk),
1307 /* I-DATA process functions */
1308 .make_datafrag = sctp_make_idatafrag_empty,
1309 .assign_number = sctp_chunk_assign_mid,
1310 .validate_data = sctp_validate_idata,
1311 .ulpevent_data = sctp_ulpevent_idata,
1312 .enqueue_event = sctp_enqueue_event,
1313 .renege_events = sctp_renege_events,
1314 .start_pd = sctp_intl_start_pd,
1315 .abort_pd = sctp_intl_abort_pd,
1316 /* I-FORWARD-TSN process functions */
1317 .generate_ftsn = sctp_generate_iftsn,
1318 .validate_ftsn = sctp_validate_iftsn,
1319 .report_ftsn = sctp_report_iftsn,
1320 .handle_ftsn = sctp_handle_iftsn,
1321};
1322
1323void sctp_stream_interleave_init(struct sctp_stream *stream)
1324{
1325 struct sctp_association *asoc;
1326
1327 asoc = container_of(stream, struct sctp_association, stream);
1328 stream->si = asoc->intl_enable ? &sctp_stream_interleave_1
1329 : &sctp_stream_interleave_0;
1330}