Loading...
1/*
2 * Copyright (c) 2007 The University of Aberdeen, Scotland, UK
3 * Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
4 *
5 * An implementation of the DCCP protocol
6 *
7 * This code has been developed by the University of Waikato WAND
8 * research group. For further information please see http://www.wand.net.nz/
9 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
10 *
11 * This code also uses code from Lulea University, rereleased as GPL by its
12 * authors:
13 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
14 *
15 * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
16 * and to make it work as a loadable module in the DCCP stack written by
17 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
18 *
19 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
20 *
21 * This program is free software; you can redistribute it and/or modify
22 * it under the terms of the GNU General Public License as published by
23 * the Free Software Foundation; either version 2 of the License, or
24 * (at your option) any later version.
25 *
26 * This program is distributed in the hope that it will be useful,
27 * but WITHOUT ANY WARRANTY; without even the implied warranty of
28 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * GNU General Public License for more details.
30 *
31 * You should have received a copy of the GNU General Public License
32 * along with this program; if not, write to the Free Software
33 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
34 */
35
36#include <linux/string.h>
37#include <linux/slab.h>
38#include "packet_history.h"
39#include "../../dccp.h"
40
41/*
42 * Transmitter History Routines
43 */
44static struct kmem_cache *tfrc_tx_hist_slab;
45
46int __init tfrc_tx_packet_history_init(void)
47{
48 tfrc_tx_hist_slab = kmem_cache_create("tfrc_tx_hist",
49 sizeof(struct tfrc_tx_hist_entry),
50 0, SLAB_HWCACHE_ALIGN, NULL);
51 return tfrc_tx_hist_slab == NULL ? -ENOBUFS : 0;
52}
53
54void tfrc_tx_packet_history_exit(void)
55{
56 if (tfrc_tx_hist_slab != NULL) {
57 kmem_cache_destroy(tfrc_tx_hist_slab);
58 tfrc_tx_hist_slab = NULL;
59 }
60}
61
62int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno)
63{
64 struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any());
65
66 if (entry == NULL)
67 return -ENOBUFS;
68 entry->seqno = seqno;
69 entry->stamp = ktime_get_real();
70 entry->next = *headp;
71 *headp = entry;
72 return 0;
73}
74
75void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp)
76{
77 struct tfrc_tx_hist_entry *head = *headp;
78
79 while (head != NULL) {
80 struct tfrc_tx_hist_entry *next = head->next;
81
82 kmem_cache_free(tfrc_tx_hist_slab, head);
83 head = next;
84 }
85
86 *headp = NULL;
87}
88
89/*
90 * Receiver History Routines
91 */
92static struct kmem_cache *tfrc_rx_hist_slab;
93
94int __init tfrc_rx_packet_history_init(void)
95{
96 tfrc_rx_hist_slab = kmem_cache_create("tfrc_rxh_cache",
97 sizeof(struct tfrc_rx_hist_entry),
98 0, SLAB_HWCACHE_ALIGN, NULL);
99 return tfrc_rx_hist_slab == NULL ? -ENOBUFS : 0;
100}
101
102void tfrc_rx_packet_history_exit(void)
103{
104 if (tfrc_rx_hist_slab != NULL) {
105 kmem_cache_destroy(tfrc_rx_hist_slab);
106 tfrc_rx_hist_slab = NULL;
107 }
108}
109
110static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry,
111 const struct sk_buff *skb,
112 const u64 ndp)
113{
114 const struct dccp_hdr *dh = dccp_hdr(skb);
115
116 entry->tfrchrx_seqno = DCCP_SKB_CB(skb)->dccpd_seq;
117 entry->tfrchrx_ccval = dh->dccph_ccval;
118 entry->tfrchrx_type = dh->dccph_type;
119 entry->tfrchrx_ndp = ndp;
120 entry->tfrchrx_tstamp = ktime_get_real();
121}
122
123void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
124 const struct sk_buff *skb,
125 const u64 ndp)
126{
127 struct tfrc_rx_hist_entry *entry = tfrc_rx_hist_last_rcv(h);
128
129 tfrc_rx_hist_entry_from_skb(entry, skb, ndp);
130}
131
132/* has the packet contained in skb been seen before? */
133int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb)
134{
135 const u64 seq = DCCP_SKB_CB(skb)->dccpd_seq;
136 int i;
137
138 if (dccp_delta_seqno(tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, seq) <= 0)
139 return 1;
140
141 for (i = 1; i <= h->loss_count; i++)
142 if (tfrc_rx_hist_entry(h, i)->tfrchrx_seqno == seq)
143 return 1;
144
145 return 0;
146}
147
148static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b)
149{
150 const u8 idx_a = tfrc_rx_hist_index(h, a),
151 idx_b = tfrc_rx_hist_index(h, b);
152 struct tfrc_rx_hist_entry *tmp = h->ring[idx_a];
153
154 h->ring[idx_a] = h->ring[idx_b];
155 h->ring[idx_b] = tmp;
156}
157
158/*
159 * Private helper functions for loss detection.
160 *
161 * In the descriptions, `Si' refers to the sequence number of entry number i,
162 * whose NDP count is `Ni' (lower case is used for variables).
163 * Note: All __xxx_loss functions expect that a test against duplicates has been
164 * performed already: the seqno of the skb must not be less than the seqno
165 * of loss_prev; and it must not equal that of any valid history entry.
166 */
167static void __do_track_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u64 n1)
168{
169 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
170 s1 = DCCP_SKB_CB(skb)->dccpd_seq;
171
172 if (!dccp_loss_free(s0, s1, n1)) { /* gap between S0 and S1 */
173 h->loss_count = 1;
174 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n1);
175 }
176}
177
178static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2)
179{
180 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
181 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
182 s2 = DCCP_SKB_CB(skb)->dccpd_seq;
183
184 if (likely(dccp_delta_seqno(s1, s2) > 0)) { /* S1 < S2 */
185 h->loss_count = 2;
186 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2);
187 return;
188 }
189
190 /* S0 < S2 < S1 */
191
192 if (dccp_loss_free(s0, s2, n2)) {
193 u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp;
194
195 if (dccp_loss_free(s2, s1, n1)) {
196 /* hole is filled: S0, S2, and S1 are consecutive */
197 h->loss_count = 0;
198 h->loss_start = tfrc_rx_hist_index(h, 1);
199 } else
200 /* gap between S2 and S1: just update loss_prev */
201 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2);
202
203 } else { /* gap between S0 and S2 */
204 /*
205 * Reorder history to insert S2 between S0 and S1
206 */
207 tfrc_rx_hist_swap(h, 0, 3);
208 h->loss_start = tfrc_rx_hist_index(h, 3);
209 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n2);
210 h->loss_count = 2;
211 }
212}
213
214/* return 1 if a new loss event has been identified */
215static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3)
216{
217 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
218 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
219 s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno,
220 s3 = DCCP_SKB_CB(skb)->dccpd_seq;
221
222 if (likely(dccp_delta_seqno(s2, s3) > 0)) { /* S2 < S3 */
223 h->loss_count = 3;
224 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3);
225 return 1;
226 }
227
228 /* S3 < S2 */
229
230 if (dccp_delta_seqno(s1, s3) > 0) { /* S1 < S3 < S2 */
231 /*
232 * Reorder history to insert S3 between S1 and S2
233 */
234 tfrc_rx_hist_swap(h, 2, 3);
235 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3);
236 h->loss_count = 3;
237 return 1;
238 }
239
240 /* S0 < S3 < S1 */
241
242 if (dccp_loss_free(s0, s3, n3)) {
243 u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp;
244
245 if (dccp_loss_free(s3, s1, n1)) {
246 /* hole between S0 and S1 filled by S3 */
247 u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp;
248
249 if (dccp_loss_free(s1, s2, n2)) {
250 /* entire hole filled by S0, S3, S1, S2 */
251 h->loss_start = tfrc_rx_hist_index(h, 2);
252 h->loss_count = 0;
253 } else {
254 /* gap remains between S1 and S2 */
255 h->loss_start = tfrc_rx_hist_index(h, 1);
256 h->loss_count = 1;
257 }
258
259 } else /* gap exists between S3 and S1, loss_count stays at 2 */
260 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n3);
261
262 return 0;
263 }
264
265 /*
266 * The remaining case: S0 < S3 < S1 < S2; gap between S0 and S3
267 * Reorder history to insert S3 between S0 and S1.
268 */
269 tfrc_rx_hist_swap(h, 0, 3);
270 h->loss_start = tfrc_rx_hist_index(h, 3);
271 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n3);
272 h->loss_count = 3;
273
274 return 1;
275}
276
277/* recycle RX history records to continue loss detection if necessary */
278static void __three_after_loss(struct tfrc_rx_hist *h)
279{
280 /*
281 * At this stage we know already that there is a gap between S0 and S1
282 * (since S0 was the highest sequence number received before detecting
283 * the loss). To recycle the loss record, it is thus only necessary to
284 * check for other possible gaps between S1/S2 and between S2/S3.
285 */
286 u64 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
287 s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno,
288 s3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_seqno;
289 u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp,
290 n3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_ndp;
291
292 if (dccp_loss_free(s1, s2, n2)) {
293
294 if (dccp_loss_free(s2, s3, n3)) {
295 /* no gap between S2 and S3: entire hole is filled */
296 h->loss_start = tfrc_rx_hist_index(h, 3);
297 h->loss_count = 0;
298 } else {
299 /* gap between S2 and S3 */
300 h->loss_start = tfrc_rx_hist_index(h, 2);
301 h->loss_count = 1;
302 }
303
304 } else { /* gap between S1 and S2 */
305 h->loss_start = tfrc_rx_hist_index(h, 1);
306 h->loss_count = 2;
307 }
308}
309
310/**
311 * tfrc_rx_handle_loss - Loss detection and further processing
312 * @h: The non-empty RX history object
313 * @lh: Loss Intervals database to update
314 * @skb: Currently received packet
315 * @ndp: The NDP count belonging to @skb
316 * @calc_first_li: Caller-dependent computation of first loss interval in @lh
317 * @sk: Used by @calc_first_li (see tfrc_lh_interval_add)
318 * Chooses action according to pending loss, updates LI database when a new
319 * loss was detected, and does required post-processing. Returns 1 when caller
320 * should send feedback, 0 otherwise.
321 * Since it also takes care of reordering during loss detection and updates the
322 * records accordingly, the caller should not perform any more RX history
323 * operations when loss_count is greater than 0 after calling this function.
324 */
325int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
326 struct tfrc_loss_hist *lh,
327 struct sk_buff *skb, const u64 ndp,
328 u32 (*calc_first_li)(struct sock *), struct sock *sk)
329{
330 int is_new_loss = 0;
331
332 if (h->loss_count == 0) {
333 __do_track_loss(h, skb, ndp);
334 } else if (h->loss_count == 1) {
335 __one_after_loss(h, skb, ndp);
336 } else if (h->loss_count != 2) {
337 DCCP_BUG("invalid loss_count %d", h->loss_count);
338 } else if (__two_after_loss(h, skb, ndp)) {
339 /*
340 * Update Loss Interval database and recycle RX records
341 */
342 is_new_loss = tfrc_lh_interval_add(lh, h, calc_first_li, sk);
343 __three_after_loss(h);
344 }
345 return is_new_loss;
346}
347
348int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h)
349{
350 int i;
351
352 for (i = 0; i <= TFRC_NDUPACK; i++) {
353 h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC);
354 if (h->ring[i] == NULL)
355 goto out_free;
356 }
357
358 h->loss_count = h->loss_start = 0;
359 return 0;
360
361out_free:
362 while (i-- != 0) {
363 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]);
364 h->ring[i] = NULL;
365 }
366 return -ENOBUFS;
367}
368
369void tfrc_rx_hist_purge(struct tfrc_rx_hist *h)
370{
371 int i;
372
373 for (i = 0; i <= TFRC_NDUPACK; ++i)
374 if (h->ring[i] != NULL) {
375 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]);
376 h->ring[i] = NULL;
377 }
378}
379
380/**
381 * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against
382 */
383static inline struct tfrc_rx_hist_entry *
384 tfrc_rx_hist_rtt_last_s(const struct tfrc_rx_hist *h)
385{
386 return h->ring[0];
387}
388
389/**
390 * tfrc_rx_hist_rtt_prev_s: previously suitable (wrt rtt_last_s) RTT-sampling entry
391 */
392static inline struct tfrc_rx_hist_entry *
393 tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h)
394{
395 return h->ring[h->rtt_sample_prev];
396}
397
398/**
399 * tfrc_rx_hist_sample_rtt - Sample RTT from timestamp / CCVal
400 * Based on ideas presented in RFC 4342, 8.1. Returns 0 if it was not able
401 * to compute a sample with given data - calling function should check this.
402 */
403u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb)
404{
405 u32 sample = 0,
406 delta_v = SUB16(dccp_hdr(skb)->dccph_ccval,
407 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
408
409 if (delta_v < 1 || delta_v > 4) { /* unsuitable CCVal delta */
410 if (h->rtt_sample_prev == 2) { /* previous candidate stored */
411 sample = SUB16(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval,
412 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
413 if (sample)
414 sample = 4 / sample *
415 ktime_us_delta(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_tstamp,
416 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp);
417 else /*
418 * FIXME: This condition is in principle not
419 * possible but occurs when CCID is used for
420 * two-way data traffic. I have tried to trace
421 * it, but the cause does not seem to be here.
422 */
423 DCCP_BUG("please report to dccp@vger.kernel.org"
424 " => prev = %u, last = %u",
425 tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval,
426 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
427 } else if (delta_v < 1) {
428 h->rtt_sample_prev = 1;
429 goto keep_ref_for_next_time;
430 }
431
432 } else if (delta_v == 4) /* optimal match */
433 sample = ktime_to_us(net_timedelta(tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp));
434 else { /* suboptimal match */
435 h->rtt_sample_prev = 2;
436 goto keep_ref_for_next_time;
437 }
438
439 if (unlikely(sample > DCCP_SANE_RTT_MAX)) {
440 DCCP_WARN("RTT sample %u too large, using max\n", sample);
441 sample = DCCP_SANE_RTT_MAX;
442 }
443
444 h->rtt_sample_prev = 0; /* use current entry as next reference */
445keep_ref_for_next_time:
446
447 return sample;
448}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (c) 2007 The University of Aberdeen, Scotland, UK
4 * Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand.
5 *
6 * An implementation of the DCCP protocol
7 *
8 * This code has been developed by the University of Waikato WAND
9 * research group. For further information please see https://www.wand.net.nz/
10 * or e-mail Ian McDonald - ian.mcdonald@jandi.co.nz
11 *
12 * This code also uses code from Lulea University, rereleased as GPL by its
13 * authors:
14 * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon
15 *
16 * Changes to meet Linux coding standards, to make it meet latest ccid3 draft
17 * and to make it work as a loadable module in the DCCP stack written by
18 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>.
19 *
20 * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
21 */
22
23#include <linux/string.h>
24#include <linux/slab.h>
25#include "packet_history.h"
26#include "../../dccp.h"
27
28/*
29 * Transmitter History Routines
30 */
31static struct kmem_cache *tfrc_tx_hist_slab;
32
33int __init tfrc_tx_packet_history_init(void)
34{
35 tfrc_tx_hist_slab = kmem_cache_create("tfrc_tx_hist",
36 sizeof(struct tfrc_tx_hist_entry),
37 0, SLAB_HWCACHE_ALIGN, NULL);
38 return tfrc_tx_hist_slab == NULL ? -ENOBUFS : 0;
39}
40
41void tfrc_tx_packet_history_exit(void)
42{
43 if (tfrc_tx_hist_slab != NULL) {
44 kmem_cache_destroy(tfrc_tx_hist_slab);
45 tfrc_tx_hist_slab = NULL;
46 }
47}
48
49int tfrc_tx_hist_add(struct tfrc_tx_hist_entry **headp, u64 seqno)
50{
51 struct tfrc_tx_hist_entry *entry = kmem_cache_alloc(tfrc_tx_hist_slab, gfp_any());
52
53 if (entry == NULL)
54 return -ENOBUFS;
55 entry->seqno = seqno;
56 entry->stamp = ktime_get_real();
57 entry->next = *headp;
58 *headp = entry;
59 return 0;
60}
61
62void tfrc_tx_hist_purge(struct tfrc_tx_hist_entry **headp)
63{
64 struct tfrc_tx_hist_entry *head = *headp;
65
66 while (head != NULL) {
67 struct tfrc_tx_hist_entry *next = head->next;
68
69 kmem_cache_free(tfrc_tx_hist_slab, head);
70 head = next;
71 }
72
73 *headp = NULL;
74}
75
76/*
77 * Receiver History Routines
78 */
79static struct kmem_cache *tfrc_rx_hist_slab;
80
81int __init tfrc_rx_packet_history_init(void)
82{
83 tfrc_rx_hist_slab = kmem_cache_create("tfrc_rxh_cache",
84 sizeof(struct tfrc_rx_hist_entry),
85 0, SLAB_HWCACHE_ALIGN, NULL);
86 return tfrc_rx_hist_slab == NULL ? -ENOBUFS : 0;
87}
88
89void tfrc_rx_packet_history_exit(void)
90{
91 if (tfrc_rx_hist_slab != NULL) {
92 kmem_cache_destroy(tfrc_rx_hist_slab);
93 tfrc_rx_hist_slab = NULL;
94 }
95}
96
97static inline void tfrc_rx_hist_entry_from_skb(struct tfrc_rx_hist_entry *entry,
98 const struct sk_buff *skb,
99 const u64 ndp)
100{
101 const struct dccp_hdr *dh = dccp_hdr(skb);
102
103 entry->tfrchrx_seqno = DCCP_SKB_CB(skb)->dccpd_seq;
104 entry->tfrchrx_ccval = dh->dccph_ccval;
105 entry->tfrchrx_type = dh->dccph_type;
106 entry->tfrchrx_ndp = ndp;
107 entry->tfrchrx_tstamp = ktime_get_real();
108}
109
110void tfrc_rx_hist_add_packet(struct tfrc_rx_hist *h,
111 const struct sk_buff *skb,
112 const u64 ndp)
113{
114 struct tfrc_rx_hist_entry *entry = tfrc_rx_hist_last_rcv(h);
115
116 tfrc_rx_hist_entry_from_skb(entry, skb, ndp);
117}
118
119/* has the packet contained in skb been seen before? */
120int tfrc_rx_hist_duplicate(struct tfrc_rx_hist *h, struct sk_buff *skb)
121{
122 const u64 seq = DCCP_SKB_CB(skb)->dccpd_seq;
123 int i;
124
125 if (dccp_delta_seqno(tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno, seq) <= 0)
126 return 1;
127
128 for (i = 1; i <= h->loss_count; i++)
129 if (tfrc_rx_hist_entry(h, i)->tfrchrx_seqno == seq)
130 return 1;
131
132 return 0;
133}
134
135static void tfrc_rx_hist_swap(struct tfrc_rx_hist *h, const u8 a, const u8 b)
136{
137 const u8 idx_a = tfrc_rx_hist_index(h, a),
138 idx_b = tfrc_rx_hist_index(h, b);
139
140 swap(h->ring[idx_a], h->ring[idx_b]);
141}
142
143/*
144 * Private helper functions for loss detection.
145 *
146 * In the descriptions, `Si' refers to the sequence number of entry number i,
147 * whose NDP count is `Ni' (lower case is used for variables).
148 * Note: All __xxx_loss functions expect that a test against duplicates has been
149 * performed already: the seqno of the skb must not be less than the seqno
150 * of loss_prev; and it must not equal that of any valid history entry.
151 */
152static void __do_track_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u64 n1)
153{
154 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
155 s1 = DCCP_SKB_CB(skb)->dccpd_seq;
156
157 if (!dccp_loss_free(s0, s1, n1)) { /* gap between S0 and S1 */
158 h->loss_count = 1;
159 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n1);
160 }
161}
162
163static void __one_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n2)
164{
165 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
166 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
167 s2 = DCCP_SKB_CB(skb)->dccpd_seq;
168
169 if (likely(dccp_delta_seqno(s1, s2) > 0)) { /* S1 < S2 */
170 h->loss_count = 2;
171 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n2);
172 return;
173 }
174
175 /* S0 < S2 < S1 */
176
177 if (dccp_loss_free(s0, s2, n2)) {
178 u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp;
179
180 if (dccp_loss_free(s2, s1, n1)) {
181 /* hole is filled: S0, S2, and S1 are consecutive */
182 h->loss_count = 0;
183 h->loss_start = tfrc_rx_hist_index(h, 1);
184 } else
185 /* gap between S2 and S1: just update loss_prev */
186 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n2);
187
188 } else { /* gap between S0 and S2 */
189 /*
190 * Reorder history to insert S2 between S0 and S1
191 */
192 tfrc_rx_hist_swap(h, 0, 3);
193 h->loss_start = tfrc_rx_hist_index(h, 3);
194 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n2);
195 h->loss_count = 2;
196 }
197}
198
199/* return 1 if a new loss event has been identified */
200static int __two_after_loss(struct tfrc_rx_hist *h, struct sk_buff *skb, u32 n3)
201{
202 u64 s0 = tfrc_rx_hist_loss_prev(h)->tfrchrx_seqno,
203 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
204 s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno,
205 s3 = DCCP_SKB_CB(skb)->dccpd_seq;
206
207 if (likely(dccp_delta_seqno(s2, s3) > 0)) { /* S2 < S3 */
208 h->loss_count = 3;
209 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 3), skb, n3);
210 return 1;
211 }
212
213 /* S3 < S2 */
214
215 if (dccp_delta_seqno(s1, s3) > 0) { /* S1 < S3 < S2 */
216 /*
217 * Reorder history to insert S3 between S1 and S2
218 */
219 tfrc_rx_hist_swap(h, 2, 3);
220 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 2), skb, n3);
221 h->loss_count = 3;
222 return 1;
223 }
224
225 /* S0 < S3 < S1 */
226
227 if (dccp_loss_free(s0, s3, n3)) {
228 u64 n1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_ndp;
229
230 if (dccp_loss_free(s3, s1, n1)) {
231 /* hole between S0 and S1 filled by S3 */
232 u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp;
233
234 if (dccp_loss_free(s1, s2, n2)) {
235 /* entire hole filled by S0, S3, S1, S2 */
236 h->loss_start = tfrc_rx_hist_index(h, 2);
237 h->loss_count = 0;
238 } else {
239 /* gap remains between S1 and S2 */
240 h->loss_start = tfrc_rx_hist_index(h, 1);
241 h->loss_count = 1;
242 }
243
244 } else /* gap exists between S3 and S1, loss_count stays at 2 */
245 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_loss_prev(h), skb, n3);
246
247 return 0;
248 }
249
250 /*
251 * The remaining case: S0 < S3 < S1 < S2; gap between S0 and S3
252 * Reorder history to insert S3 between S0 and S1.
253 */
254 tfrc_rx_hist_swap(h, 0, 3);
255 h->loss_start = tfrc_rx_hist_index(h, 3);
256 tfrc_rx_hist_entry_from_skb(tfrc_rx_hist_entry(h, 1), skb, n3);
257 h->loss_count = 3;
258
259 return 1;
260}
261
262/* recycle RX history records to continue loss detection if necessary */
263static void __three_after_loss(struct tfrc_rx_hist *h)
264{
265 /*
266 * At this stage we know already that there is a gap between S0 and S1
267 * (since S0 was the highest sequence number received before detecting
268 * the loss). To recycle the loss record, it is thus only necessary to
269 * check for other possible gaps between S1/S2 and between S2/S3.
270 */
271 u64 s1 = tfrc_rx_hist_entry(h, 1)->tfrchrx_seqno,
272 s2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_seqno,
273 s3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_seqno;
274 u64 n2 = tfrc_rx_hist_entry(h, 2)->tfrchrx_ndp,
275 n3 = tfrc_rx_hist_entry(h, 3)->tfrchrx_ndp;
276
277 if (dccp_loss_free(s1, s2, n2)) {
278
279 if (dccp_loss_free(s2, s3, n3)) {
280 /* no gap between S2 and S3: entire hole is filled */
281 h->loss_start = tfrc_rx_hist_index(h, 3);
282 h->loss_count = 0;
283 } else {
284 /* gap between S2 and S3 */
285 h->loss_start = tfrc_rx_hist_index(h, 2);
286 h->loss_count = 1;
287 }
288
289 } else { /* gap between S1 and S2 */
290 h->loss_start = tfrc_rx_hist_index(h, 1);
291 h->loss_count = 2;
292 }
293}
294
295/**
296 * tfrc_rx_handle_loss - Loss detection and further processing
297 * @h: The non-empty RX history object
298 * @lh: Loss Intervals database to update
299 * @skb: Currently received packet
300 * @ndp: The NDP count belonging to @skb
301 * @calc_first_li: Caller-dependent computation of first loss interval in @lh
302 * @sk: Used by @calc_first_li (see tfrc_lh_interval_add)
303 *
304 * Chooses action according to pending loss, updates LI database when a new
305 * loss was detected, and does required post-processing. Returns 1 when caller
306 * should send feedback, 0 otherwise.
307 * Since it also takes care of reordering during loss detection and updates the
308 * records accordingly, the caller should not perform any more RX history
309 * operations when loss_count is greater than 0 after calling this function.
310 */
311int tfrc_rx_handle_loss(struct tfrc_rx_hist *h,
312 struct tfrc_loss_hist *lh,
313 struct sk_buff *skb, const u64 ndp,
314 u32 (*calc_first_li)(struct sock *), struct sock *sk)
315{
316 int is_new_loss = 0;
317
318 if (h->loss_count == 0) {
319 __do_track_loss(h, skb, ndp);
320 } else if (h->loss_count == 1) {
321 __one_after_loss(h, skb, ndp);
322 } else if (h->loss_count != 2) {
323 DCCP_BUG("invalid loss_count %d", h->loss_count);
324 } else if (__two_after_loss(h, skb, ndp)) {
325 /*
326 * Update Loss Interval database and recycle RX records
327 */
328 is_new_loss = tfrc_lh_interval_add(lh, h, calc_first_li, sk);
329 __three_after_loss(h);
330 }
331 return is_new_loss;
332}
333
334int tfrc_rx_hist_alloc(struct tfrc_rx_hist *h)
335{
336 int i;
337
338 for (i = 0; i <= TFRC_NDUPACK; i++) {
339 h->ring[i] = kmem_cache_alloc(tfrc_rx_hist_slab, GFP_ATOMIC);
340 if (h->ring[i] == NULL)
341 goto out_free;
342 }
343
344 h->loss_count = h->loss_start = 0;
345 return 0;
346
347out_free:
348 while (i-- != 0) {
349 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]);
350 h->ring[i] = NULL;
351 }
352 return -ENOBUFS;
353}
354
355void tfrc_rx_hist_purge(struct tfrc_rx_hist *h)
356{
357 int i;
358
359 for (i = 0; i <= TFRC_NDUPACK; ++i)
360 if (h->ring[i] != NULL) {
361 kmem_cache_free(tfrc_rx_hist_slab, h->ring[i]);
362 h->ring[i] = NULL;
363 }
364}
365
366/**
367 * tfrc_rx_hist_rtt_last_s - reference entry to compute RTT samples against
368 * @h: The non-empty RX history object
369 */
370static inline struct tfrc_rx_hist_entry *
371 tfrc_rx_hist_rtt_last_s(const struct tfrc_rx_hist *h)
372{
373 return h->ring[0];
374}
375
376/**
377 * tfrc_rx_hist_rtt_prev_s - previously suitable (wrt rtt_last_s) RTT-sampling entry
378 * @h: The non-empty RX history object
379 */
380static inline struct tfrc_rx_hist_entry *
381 tfrc_rx_hist_rtt_prev_s(const struct tfrc_rx_hist *h)
382{
383 return h->ring[h->rtt_sample_prev];
384}
385
386/**
387 * tfrc_rx_hist_sample_rtt - Sample RTT from timestamp / CCVal
388 * @h: receive histogram
389 * @skb: packet containing timestamp.
390 *
391 * Based on ideas presented in RFC 4342, 8.1. Returns 0 if it was not able
392 * to compute a sample with given data - calling function should check this.
393 */
394u32 tfrc_rx_hist_sample_rtt(struct tfrc_rx_hist *h, const struct sk_buff *skb)
395{
396 u32 sample = 0,
397 delta_v = SUB16(dccp_hdr(skb)->dccph_ccval,
398 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
399
400 if (delta_v < 1 || delta_v > 4) { /* unsuitable CCVal delta */
401 if (h->rtt_sample_prev == 2) { /* previous candidate stored */
402 sample = SUB16(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval,
403 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
404 if (sample)
405 sample = 4 / sample *
406 ktime_us_delta(tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_tstamp,
407 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp);
408 else /*
409 * FIXME: This condition is in principle not
410 * possible but occurs when CCID is used for
411 * two-way data traffic. I have tried to trace
412 * it, but the cause does not seem to be here.
413 */
414 DCCP_BUG("please report to dccp@vger.kernel.org"
415 " => prev = %u, last = %u",
416 tfrc_rx_hist_rtt_prev_s(h)->tfrchrx_ccval,
417 tfrc_rx_hist_rtt_last_s(h)->tfrchrx_ccval);
418 } else if (delta_v < 1) {
419 h->rtt_sample_prev = 1;
420 goto keep_ref_for_next_time;
421 }
422
423 } else if (delta_v == 4) /* optimal match */
424 sample = ktime_to_us(net_timedelta(tfrc_rx_hist_rtt_last_s(h)->tfrchrx_tstamp));
425 else { /* suboptimal match */
426 h->rtt_sample_prev = 2;
427 goto keep_ref_for_next_time;
428 }
429
430 if (unlikely(sample > DCCP_SANE_RTT_MAX)) {
431 DCCP_WARN("RTT sample %u too large, using max\n", sample);
432 sample = DCCP_SANE_RTT_MAX;
433 }
434
435 h->rtt_sample_prev = 0; /* use current entry as next reference */
436keep_ref_for_next_time:
437
438 return sample;
439}