Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 *  Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
  4 *
  5 *  Changes to meet Linux coding standards, and DCCP infrastructure fixes.
  6 *
  7 *  Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  8 */
  9
 10/*
 11 * This implementation should follow RFC 4341
 12 */
 13#include <linux/slab.h>
 14#include "../feat.h"
 15#include "ccid2.h"
 16
 17
 18#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
 19static bool ccid2_debug;
 20#define ccid2_pr_debug(format, a...)	DCCP_PR_DEBUG(ccid2_debug, format, ##a)
 21#else
 22#define ccid2_pr_debug(format, a...)
 23#endif
 24
 25static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
 26{
 27	struct ccid2_seq *seqp;
 28	int i;
 29
 30	/* check if we have space to preserve the pointer to the buffer */
 31	if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) /
 32			       sizeof(struct ccid2_seq *)))
 33		return -ENOMEM;
 34
 35	/* allocate buffer and initialize linked list */
 36	seqp = kmalloc_array(CCID2_SEQBUF_LEN, sizeof(struct ccid2_seq),
 37			     gfp_any());
 38	if (seqp == NULL)
 39		return -ENOMEM;
 40
 41	for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) {
 42		seqp[i].ccid2s_next = &seqp[i + 1];
 43		seqp[i + 1].ccid2s_prev = &seqp[i];
 44	}
 45	seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp;
 46	seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
 47
 48	/* This is the first allocation.  Initiate the head and tail.  */
 49	if (hc->tx_seqbufc == 0)
 50		hc->tx_seqh = hc->tx_seqt = seqp;
 51	else {
 52		/* link the existing list with the one we just created */
 53		hc->tx_seqh->ccid2s_next = seqp;
 54		seqp->ccid2s_prev = hc->tx_seqh;
 55
 56		hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
 57		seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt;
 58	}
 59
 60	/* store the original pointer to the buffer so we can free it */
 61	hc->tx_seqbuf[hc->tx_seqbufc] = seqp;
 62	hc->tx_seqbufc++;
 63
 64	return 0;
 65}
 66
 67static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
 68{
 69	if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
 70		return CCID_PACKET_WILL_DEQUEUE_LATER;
 71	return CCID_PACKET_SEND_AT_ONCE;
 72}
 73
 74static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
 75{
 76	u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
 77
 78	/*
 79	 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
 80	 * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
 81	 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
 82	 * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
 83	 */
 84	if (val == 0 || val > max_ratio) {
 85		DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
 86		val = max_ratio;
 87	}
 88	dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
 89				   min_t(u32, val, DCCPF_ACK_RATIO_MAX));
 90}
 91
 92static void ccid2_check_l_ack_ratio(struct sock *sk)
 93{
 94	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
 95
 96	/*
 97	 * After a loss, idle period, application limited period, or RTO we
 98	 * need to check that the ack ratio is still less than the congestion
 99	 * window. Otherwise, we will send an entire congestion window of
100	 * packets and got no response because we haven't sent ack ratio
101	 * packets yet.
102	 * If the ack ratio does need to be reduced, we reduce it to half of
103	 * the congestion window (or 1 if that's zero) instead of to the
104	 * congestion window. This prevents problems if one ack is lost.
105	 */
106	if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
107		ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
108}
109
110static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
111{
112	dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
113				   clamp_val(val, DCCPF_SEQ_WMIN,
114						  DCCPF_SEQ_WMAX));
115}
116
117static void dccp_tasklet_schedule(struct sock *sk)
118{
119	struct tasklet_struct *t = &dccp_sk(sk)->dccps_xmitlet;
120
121	if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) {
122		sock_hold(sk);
123		__tasklet_schedule(t);
124	}
125}
126
127static void ccid2_hc_tx_rto_expire(struct timer_list *t)
128{
129	struct ccid2_hc_tx_sock *hc = from_timer(hc, t, tx_rtotimer);
130	struct sock *sk = hc->sk;
131	const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
132
133	bh_lock_sock(sk);
134	if (sock_owned_by_user(sk)) {
135		sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5);
136		goto out;
137	}
138
139	ccid2_pr_debug("RTO_EXPIRE\n");
140
141	if (sk->sk_state == DCCP_CLOSED)
142		goto out;
143
144	/* back-off timer */
145	hc->tx_rto <<= 1;
146	if (hc->tx_rto > DCCP_RTO_MAX)
147		hc->tx_rto = DCCP_RTO_MAX;
148
149	/* adjust pipe, cwnd etc */
150	hc->tx_ssthresh = hc->tx_cwnd / 2;
151	if (hc->tx_ssthresh < 2)
152		hc->tx_ssthresh = 2;
153	hc->tx_cwnd	= 1;
154	hc->tx_pipe	= 0;
155
156	/* clear state about stuff we sent */
157	hc->tx_seqt = hc->tx_seqh;
158	hc->tx_packets_acked = 0;
159
160	/* clear ack ratio state. */
161	hc->tx_rpseq    = 0;
162	hc->tx_rpdupack = -1;
163	ccid2_change_l_ack_ratio(sk, 1);
164
165	/* if we were blocked before, we may now send cwnd=1 packet */
166	if (sender_was_blocked)
167		dccp_tasklet_schedule(sk);
168	/* restart backed-off timer */
169	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
170out:
171	bh_unlock_sock(sk);
172	sock_put(sk);
173}
174
175/*
176 *	Congestion window validation (RFC 2861).
177 */
178static bool ccid2_do_cwv = true;
179module_param(ccid2_do_cwv, bool, 0644);
180MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
181
182/**
183 * ccid2_update_used_window  -  Track how much of cwnd is actually used
184 * This is done in addition to CWV. The sender needs to have an idea of how many
185 * packets may be in flight, to set the local Sequence Window value accordingly
186 * (RFC 4340, 7.5.2). The CWV mechanism is exploited to keep track of the
187 * maximum-used window. We use an EWMA low-pass filter to filter out noise.
188 */
189static void ccid2_update_used_window(struct ccid2_hc_tx_sock *hc, u32 new_wnd)
190{
191	hc->tx_expected_wnd = (3 * hc->tx_expected_wnd + new_wnd) / 4;
192}
193
194/* This borrows the code of tcp_cwnd_application_limited() */
195static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
196{
197	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
198	/* don't reduce cwnd below the initial window (IW) */
199	u32 init_win = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache),
200	    win_used = max(hc->tx_cwnd_used, init_win);
201
202	if (win_used < hc->tx_cwnd) {
203		hc->tx_ssthresh = max(hc->tx_ssthresh,
204				     (hc->tx_cwnd >> 1) + (hc->tx_cwnd >> 2));
205		hc->tx_cwnd = (hc->tx_cwnd + win_used) >> 1;
206	}
207	hc->tx_cwnd_used  = 0;
208	hc->tx_cwnd_stamp = now;
209
210	ccid2_check_l_ack_ratio(sk);
211}
212
213/* This borrows the code of tcp_cwnd_restart() */
214static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
215{
216	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
217	u32 cwnd = hc->tx_cwnd, restart_cwnd,
218	    iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
219	s32 delta = now - hc->tx_lsndtime;
220
221	hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
222
223	/* don't reduce cwnd below the initial window (IW) */
224	restart_cwnd = min(cwnd, iwnd);
225
226	while ((delta -= hc->tx_rto) >= 0 && cwnd > restart_cwnd)
227		cwnd >>= 1;
228	hc->tx_cwnd = max(cwnd, restart_cwnd);
 
229	hc->tx_cwnd_stamp = now;
230	hc->tx_cwnd_used  = 0;
231
232	ccid2_check_l_ack_ratio(sk);
233}
234
235static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
236{
237	struct dccp_sock *dp = dccp_sk(sk);
238	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
239	const u32 now = ccid2_jiffies32;
240	struct ccid2_seq *next;
241
242	/* slow-start after idle periods (RFC 2581, RFC 2861) */
243	if (ccid2_do_cwv && !hc->tx_pipe &&
244	    (s32)(now - hc->tx_lsndtime) >= hc->tx_rto)
245		ccid2_cwnd_restart(sk, now);
246
247	hc->tx_lsndtime = now;
248	hc->tx_pipe    += 1;
249
250	/* see whether cwnd was fully used (RFC 2861), update expected window */
251	if (ccid2_cwnd_network_limited(hc)) {
252		ccid2_update_used_window(hc, hc->tx_cwnd);
253		hc->tx_cwnd_used  = 0;
254		hc->tx_cwnd_stamp = now;
255	} else {
256		if (hc->tx_pipe > hc->tx_cwnd_used)
257			hc->tx_cwnd_used = hc->tx_pipe;
258
259		ccid2_update_used_window(hc, hc->tx_cwnd_used);
260
261		if (ccid2_do_cwv && (s32)(now - hc->tx_cwnd_stamp) >= hc->tx_rto)
262			ccid2_cwnd_application_limited(sk, now);
263	}
264
265	hc->tx_seqh->ccid2s_seq   = dp->dccps_gss;
266	hc->tx_seqh->ccid2s_acked = 0;
267	hc->tx_seqh->ccid2s_sent  = now;
268
269	next = hc->tx_seqh->ccid2s_next;
270	/* check if we need to alloc more space */
271	if (next == hc->tx_seqt) {
272		if (ccid2_hc_tx_alloc_seq(hc)) {
273			DCCP_CRIT("packet history - out of memory!");
274			/* FIXME: find a more graceful way to bail out */
275			return;
276		}
277		next = hc->tx_seqh->ccid2s_next;
278		BUG_ON(next == hc->tx_seqt);
279	}
280	hc->tx_seqh = next;
281
282	ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe);
283
284	/*
285	 * FIXME: The code below is broken and the variables have been removed
286	 * from the socket struct. The `ackloss' variable was always set to 0,
287	 * and with arsent there are several problems:
288	 *  (i) it doesn't just count the number of Acks, but all sent packets;
289	 *  (ii) it is expressed in # of packets, not # of windows, so the
290	 *  comparison below uses the wrong formula: Appendix A of RFC 4341
291	 *  comes up with the number K = cwnd / (R^2 - R) of consecutive windows
292	 *  of data with no lost or marked Ack packets. If arsent were the # of
293	 *  consecutive Acks received without loss, then Ack Ratio needs to be
294	 *  decreased by 1 when
295	 *	      arsent >=  K * cwnd / R  =  cwnd^2 / (R^3 - R^2)
296	 *  where cwnd / R is the number of Acks received per window of data
297	 *  (cf. RFC 4341, App. A). The problems are that
298	 *  - arsent counts other packets as well;
299	 *  - the comparison uses a formula different from RFC 4341;
300	 *  - computing a cubic/quadratic equation each time is too complicated.
301	 *  Hence a different algorithm is needed.
302	 */
303#if 0
304	/* Ack Ratio.  Need to maintain a concept of how many windows we sent */
305	hc->tx_arsent++;
306	/* We had an ack loss in this window... */
307	if (hc->tx_ackloss) {
308		if (hc->tx_arsent >= hc->tx_cwnd) {
309			hc->tx_arsent  = 0;
310			hc->tx_ackloss = 0;
311		}
312	} else {
313		/* No acks lost up to now... */
314		/* decrease ack ratio if enough packets were sent */
315		if (dp->dccps_l_ack_ratio > 1) {
316			/* XXX don't calculate denominator each time */
317			int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio -
318				    dp->dccps_l_ack_ratio;
319
320			denom = hc->tx_cwnd * hc->tx_cwnd / denom;
321
322			if (hc->tx_arsent >= denom) {
323				ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
324				hc->tx_arsent = 0;
325			}
326		} else {
327			/* we can't increase ack ratio further [1] */
328			hc->tx_arsent = 0; /* or maybe set it to cwnd*/
329		}
330	}
331#endif
332
333	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
334
335#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
336	do {
337		struct ccid2_seq *seqp = hc->tx_seqt;
338
339		while (seqp != hc->tx_seqh) {
340			ccid2_pr_debug("out seq=%llu acked=%d time=%u\n",
341				       (unsigned long long)seqp->ccid2s_seq,
342				       seqp->ccid2s_acked, seqp->ccid2s_sent);
343			seqp = seqp->ccid2s_next;
344		}
345	} while (0);
346	ccid2_pr_debug("=========\n");
347#endif
348}
349
350/**
351 * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
352 * This code is almost identical with TCP's tcp_rtt_estimator(), since
353 * - it has a higher sampling frequency (recommended by RFC 1323),
354 * - the RTO does not collapse into RTT due to RTTVAR going towards zero,
355 * - it is simple (cf. more complex proposals such as Eifel timer or research
356 *   which suggests that the gain should be set according to window size),
357 * - in tests it was found to work well with CCID2 [gerrit].
358 */
359static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
360{
361	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
362	long m = mrtt ? : 1;
363
364	if (hc->tx_srtt == 0) {
365		/* First measurement m */
366		hc->tx_srtt = m << 3;
367		hc->tx_mdev = m << 1;
368
369		hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk));
370		hc->tx_rttvar   = hc->tx_mdev_max;
371
372		hc->tx_rtt_seq  = dccp_sk(sk)->dccps_gss;
373	} else {
374		/* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */
375		m -= (hc->tx_srtt >> 3);
376		hc->tx_srtt += m;
377
378		/* Similarly, update scaled mdev with regard to |m| */
379		if (m < 0) {
380			m = -m;
381			m -= (hc->tx_mdev >> 2);
382			/*
383			 * This neutralises RTO increase when RTT < SRTT - mdev
384			 * (see P. Sarolahti, A. Kuznetsov,"Congestion Control
385			 * in Linux TCP", USENIX 2002, pp. 49-62).
386			 */
387			if (m > 0)
388				m >>= 3;
389		} else {
390			m -= (hc->tx_mdev >> 2);
391		}
392		hc->tx_mdev += m;
393
394		if (hc->tx_mdev > hc->tx_mdev_max) {
395			hc->tx_mdev_max = hc->tx_mdev;
396			if (hc->tx_mdev_max > hc->tx_rttvar)
397				hc->tx_rttvar = hc->tx_mdev_max;
398		}
399
400		/*
401		 * Decay RTTVAR at most once per flight, exploiting that
402		 *  1) pipe <= cwnd <= Sequence_Window = W  (RFC 4340, 7.5.2)
403		 *  2) AWL = GSS-W+1 <= GAR <= GSS          (RFC 4340, 7.5.1)
404		 * GAR is a useful bound for FlightSize = pipe.
405		 * AWL is probably too low here, as it over-estimates pipe.
406		 */
407		if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) {
408			if (hc->tx_mdev_max < hc->tx_rttvar)
409				hc->tx_rttvar -= (hc->tx_rttvar -
410						  hc->tx_mdev_max) >> 2;
411			hc->tx_rtt_seq  = dccp_sk(sk)->dccps_gss;
412			hc->tx_mdev_max = tcp_rto_min(sk);
413		}
414	}
415
416	/*
417	 * Set RTO from SRTT and RTTVAR
418	 * As in TCP, 4 * RTTVAR >= TCP_RTO_MIN, giving a minimum RTO of 200 ms.
419	 * This agrees with RFC 4341, 5:
420	 *	"Because DCCP does not retransmit data, DCCP does not require
421	 *	 TCP's recommended minimum timeout of one second".
422	 */
423	hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar;
424
425	if (hc->tx_rto > DCCP_RTO_MAX)
426		hc->tx_rto = DCCP_RTO_MAX;
427}
428
429static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
430			  unsigned int *maxincr)
431{
432	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
433	struct dccp_sock *dp = dccp_sk(sk);
434	int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
435
436	if (hc->tx_cwnd < dp->dccps_l_seq_win &&
437	    r_seq_used < dp->dccps_r_seq_win) {
438		if (hc->tx_cwnd < hc->tx_ssthresh) {
439			if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
440				hc->tx_cwnd += 1;
441				*maxincr    -= 1;
442				hc->tx_packets_acked = 0;
443			}
444		} else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
445			hc->tx_cwnd += 1;
446			hc->tx_packets_acked = 0;
447		}
448	}
449
450	/*
451	 * Adjust the local sequence window and the ack ratio to allow about
452	 * 5 times the number of packets in the network (RFC 4340 7.5.2)
453	 */
454	if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
455		ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
456	else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
457		ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
458
459	if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
460		ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
461	else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
462		ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
463
464	/*
465	 * FIXME: RTT is sampled several times per acknowledgment (for each
466	 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
467	 * This causes the RTT to be over-estimated, since the older entries
468	 * in the Ack Vector have earlier sending times.
469	 * The cleanest solution is to not use the ccid2s_sent field at all
470	 * and instead use DCCP timestamps: requires changes in other places.
471	 */
472	ccid2_rtt_estimator(sk, ccid2_jiffies32 - seqp->ccid2s_sent);
473}
474
475static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
476{
477	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
478
479	if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) {
480		ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
481		return;
482	}
483
484	hc->tx_last_cong = ccid2_jiffies32;
485
486	hc->tx_cwnd      = hc->tx_cwnd / 2 ? : 1U;
487	hc->tx_ssthresh  = max(hc->tx_cwnd, 2U);
488
489	ccid2_check_l_ack_ratio(sk);
490}
491
492static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
493				     u8 option, u8 *optval, u8 optlen)
494{
495	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
496
497	switch (option) {
498	case DCCPO_ACK_VECTOR_0:
499	case DCCPO_ACK_VECTOR_1:
500		return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen,
501					      option - DCCPO_ACK_VECTOR_0);
502	}
503	return 0;
504}
505
506static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
507{
508	struct dccp_sock *dp = dccp_sk(sk);
509	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
510	const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
511	struct dccp_ackvec_parsed *avp;
512	u64 ackno, seqno;
513	struct ccid2_seq *seqp;
514	int done = 0;
515	unsigned int maxincr = 0;
516
517	/* check reverse path congestion */
518	seqno = DCCP_SKB_CB(skb)->dccpd_seq;
519
520	/* XXX this whole "algorithm" is broken.  Need to fix it to keep track
521	 * of the seqnos of the dupacks so that rpseq and rpdupack are correct
522	 * -sorbo.
523	 */
524	/* need to bootstrap */
525	if (hc->tx_rpdupack == -1) {
526		hc->tx_rpdupack = 0;
527		hc->tx_rpseq    = seqno;
528	} else {
529		/* check if packet is consecutive */
530		if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1)
531			hc->tx_rpseq = seqno;
532		/* it's a later packet */
533		else if (after48(seqno, hc->tx_rpseq)) {
534			hc->tx_rpdupack++;
535
536			/* check if we got enough dupacks */
537			if (hc->tx_rpdupack >= NUMDUPACK) {
538				hc->tx_rpdupack = -1; /* XXX lame */
539				hc->tx_rpseq    = 0;
540#ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
541				/*
542				 * FIXME: Ack Congestion Control is broken; in
543				 * the current state instabilities occurred with
544				 * Ack Ratios greater than 1; causing hang-ups
545				 * and long RTO timeouts. This needs to be fixed
546				 * before opening up dynamic changes. -- gerrit
547				 */
548				ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
549#endif
550			}
551		}
552	}
553
554	/* check forward path congestion */
555	if (dccp_packet_without_ack(skb))
556		return;
557
558	/* still didn't send out new data packets */
559	if (hc->tx_seqh == hc->tx_seqt)
560		goto done;
561
562	ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
563	if (after48(ackno, hc->tx_high_ack))
564		hc->tx_high_ack = ackno;
565
566	seqp = hc->tx_seqt;
567	while (before48(seqp->ccid2s_seq, ackno)) {
568		seqp = seqp->ccid2s_next;
569		if (seqp == hc->tx_seqh) {
570			seqp = hc->tx_seqh->ccid2s_prev;
571			break;
572		}
573	}
574
575	/*
576	 * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2
577	 * packets per acknowledgement. Rounding up avoids that cwnd is not
578	 * advanced when Ack Ratio is 1 and gives a slight edge otherwise.
579	 */
580	if (hc->tx_cwnd < hc->tx_ssthresh)
581		maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
582
583	/* go through all ack vectors */
584	list_for_each_entry(avp, &hc->tx_av_chunks, node) {
585		/* go through this ack vector */
586		for (; avp->len--; avp->vec++) {
587			u64 ackno_end_rl = SUB48(ackno,
588						 dccp_ackvec_runlen(avp->vec));
589
590			ccid2_pr_debug("ackvec %llu |%u,%u|\n",
591				       (unsigned long long)ackno,
592				       dccp_ackvec_state(avp->vec) >> 6,
593				       dccp_ackvec_runlen(avp->vec));
594			/* if the seqno we are analyzing is larger than the
595			 * current ackno, then move towards the tail of our
596			 * seqnos.
597			 */
598			while (after48(seqp->ccid2s_seq, ackno)) {
599				if (seqp == hc->tx_seqt) {
600					done = 1;
601					break;
602				}
603				seqp = seqp->ccid2s_prev;
604			}
605			if (done)
606				break;
607
608			/* check all seqnos in the range of the vector
609			 * run length
610			 */
611			while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
612				const u8 state = dccp_ackvec_state(avp->vec);
613
614				/* new packet received or marked */
615				if (state != DCCPAV_NOT_RECEIVED &&
616				    !seqp->ccid2s_acked) {
617					if (state == DCCPAV_ECN_MARKED)
618						ccid2_congestion_event(sk,
619								       seqp);
620					else
621						ccid2_new_ack(sk, seqp,
622							      &maxincr);
623
624					seqp->ccid2s_acked = 1;
625					ccid2_pr_debug("Got ack for %llu\n",
626						       (unsigned long long)seqp->ccid2s_seq);
627					hc->tx_pipe--;
628				}
629				if (seqp == hc->tx_seqt) {
630					done = 1;
631					break;
632				}
633				seqp = seqp->ccid2s_prev;
634			}
635			if (done)
636				break;
637
638			ackno = SUB48(ackno_end_rl, 1);
639		}
640		if (done)
641			break;
642	}
643
644	/* The state about what is acked should be correct now
645	 * Check for NUMDUPACK
646	 */
647	seqp = hc->tx_seqt;
648	while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) {
649		seqp = seqp->ccid2s_next;
650		if (seqp == hc->tx_seqh) {
651			seqp = hc->tx_seqh->ccid2s_prev;
652			break;
653		}
654	}
655	done = 0;
656	while (1) {
657		if (seqp->ccid2s_acked) {
658			done++;
659			if (done == NUMDUPACK)
660				break;
661		}
662		if (seqp == hc->tx_seqt)
663			break;
664		seqp = seqp->ccid2s_prev;
665	}
666
667	/* If there are at least 3 acknowledgements, anything unacknowledged
668	 * below the last sequence number is considered lost
669	 */
670	if (done == NUMDUPACK) {
671		struct ccid2_seq *last_acked = seqp;
672
673		/* check for lost packets */
674		while (1) {
675			if (!seqp->ccid2s_acked) {
676				ccid2_pr_debug("Packet lost: %llu\n",
677					       (unsigned long long)seqp->ccid2s_seq);
678				/* XXX need to traverse from tail -> head in
679				 * order to detect multiple congestion events in
680				 * one ack vector.
681				 */
682				ccid2_congestion_event(sk, seqp);
683				hc->tx_pipe--;
684			}
685			if (seqp == hc->tx_seqt)
686				break;
687			seqp = seqp->ccid2s_prev;
688		}
689
690		hc->tx_seqt = last_acked;
691	}
692
693	/* trim acked packets in tail */
694	while (hc->tx_seqt != hc->tx_seqh) {
695		if (!hc->tx_seqt->ccid2s_acked)
696			break;
697
698		hc->tx_seqt = hc->tx_seqt->ccid2s_next;
699	}
700
701	/* restart RTO timer if not all outstanding data has been acked */
702	if (hc->tx_pipe == 0)
703		sk_stop_timer(sk, &hc->tx_rtotimer);
704	else
705		sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
706done:
707	/* check if incoming Acks allow pending packets to be sent */
708	if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
709		dccp_tasklet_schedule(sk);
710	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
711}
712
713static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
714{
715	struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
716	struct dccp_sock *dp = dccp_sk(sk);
717	u32 max_ratio;
718
719	/* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
720	hc->tx_ssthresh = ~0U;
721
722	/* Use larger initial windows (RFC 4341, section 5). */
723	hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache);
724	hc->tx_expected_wnd = hc->tx_cwnd;
725
726	/* Make sure that Ack Ratio is enabled and within bounds. */
727	max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
728	if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio)
729		dp->dccps_l_ack_ratio = max_ratio;
730
731	/* XXX init ~ to window size... */
732	if (ccid2_hc_tx_alloc_seq(hc))
733		return -ENOMEM;
734
735	hc->tx_rto	 = DCCP_TIMEOUT_INIT;
736	hc->tx_rpdupack  = -1;
737	hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_jiffies32;
738	hc->tx_cwnd_used = 0;
739	hc->sk		 = sk;
740	timer_setup(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, 0);
741	INIT_LIST_HEAD(&hc->tx_av_chunks);
742	return 0;
743}
744
745static void ccid2_hc_tx_exit(struct sock *sk)
746{
747	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
748	int i;
749
750	sk_stop_timer(sk, &hc->tx_rtotimer);
751
752	for (i = 0; i < hc->tx_seqbufc; i++)
753		kfree(hc->tx_seqbuf[i]);
754	hc->tx_seqbufc = 0;
755	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
756}
757
758static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
759{
760	struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
761
762	if (!dccp_data_packet(skb))
763		return;
764
765	if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) {
766		dccp_send_ack(sk);
767		hc->rx_num_data_pkts = 0;
768	}
769}
770
771struct ccid_operations ccid2_ops = {
772	.ccid_id		  = DCCPC_CCID2,
773	.ccid_name		  = "TCP-like",
774	.ccid_hc_tx_obj_size	  = sizeof(struct ccid2_hc_tx_sock),
775	.ccid_hc_tx_init	  = ccid2_hc_tx_init,
776	.ccid_hc_tx_exit	  = ccid2_hc_tx_exit,
777	.ccid_hc_tx_send_packet	  = ccid2_hc_tx_send_packet,
778	.ccid_hc_tx_packet_sent	  = ccid2_hc_tx_packet_sent,
779	.ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options,
780	.ccid_hc_tx_packet_recv	  = ccid2_hc_tx_packet_recv,
781	.ccid_hc_rx_obj_size	  = sizeof(struct ccid2_hc_rx_sock),
782	.ccid_hc_rx_packet_recv	  = ccid2_hc_rx_packet_recv,
783};
784
785#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
786module_param(ccid2_debug, bool, 0644);
787MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages");
788#endif
v3.5.6
 
  1/*
  2 *  Copyright (c) 2005, 2006 Andrea Bittau <a.bittau@cs.ucl.ac.uk>
  3 *
  4 *  Changes to meet Linux coding standards, and DCCP infrastructure fixes.
  5 *
  6 *  Copyright (c) 2006 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
  7 *
  8 *  This program is free software; you can redistribute it and/or modify
  9 *  it under the terms of the GNU General Public License as published by
 10 *  the Free Software Foundation; either version 2 of the License, or
 11 *  (at your option) any later version.
 12 *
 13 *  This program is distributed in the hope that it will be useful,
 14 *  but WITHOUT ANY WARRANTY; without even the implied warranty of
 15 *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16 *  GNU General Public License for more details.
 17 *
 18 *  You should have received a copy of the GNU General Public License
 19 *  along with this program; if not, write to the Free Software
 20 *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 21 */
 22
 23/*
 24 * This implementation should follow RFC 4341
 25 */
 26#include <linux/slab.h>
 27#include "../feat.h"
 28#include "ccid2.h"
 29
 30
 31#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
 32static bool ccid2_debug;
 33#define ccid2_pr_debug(format, a...)	DCCP_PR_DEBUG(ccid2_debug, format, ##a)
 34#else
 35#define ccid2_pr_debug(format, a...)
 36#endif
 37
 38static int ccid2_hc_tx_alloc_seq(struct ccid2_hc_tx_sock *hc)
 39{
 40	struct ccid2_seq *seqp;
 41	int i;
 42
 43	/* check if we have space to preserve the pointer to the buffer */
 44	if (hc->tx_seqbufc >= (sizeof(hc->tx_seqbuf) /
 45			       sizeof(struct ccid2_seq *)))
 46		return -ENOMEM;
 47
 48	/* allocate buffer and initialize linked list */
 49	seqp = kmalloc(CCID2_SEQBUF_LEN * sizeof(struct ccid2_seq), gfp_any());
 
 50	if (seqp == NULL)
 51		return -ENOMEM;
 52
 53	for (i = 0; i < (CCID2_SEQBUF_LEN - 1); i++) {
 54		seqp[i].ccid2s_next = &seqp[i + 1];
 55		seqp[i + 1].ccid2s_prev = &seqp[i];
 56	}
 57	seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = seqp;
 58	seqp->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
 59
 60	/* This is the first allocation.  Initiate the head and tail.  */
 61	if (hc->tx_seqbufc == 0)
 62		hc->tx_seqh = hc->tx_seqt = seqp;
 63	else {
 64		/* link the existing list with the one we just created */
 65		hc->tx_seqh->ccid2s_next = seqp;
 66		seqp->ccid2s_prev = hc->tx_seqh;
 67
 68		hc->tx_seqt->ccid2s_prev = &seqp[CCID2_SEQBUF_LEN - 1];
 69		seqp[CCID2_SEQBUF_LEN - 1].ccid2s_next = hc->tx_seqt;
 70	}
 71
 72	/* store the original pointer to the buffer so we can free it */
 73	hc->tx_seqbuf[hc->tx_seqbufc] = seqp;
 74	hc->tx_seqbufc++;
 75
 76	return 0;
 77}
 78
 79static int ccid2_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb)
 80{
 81	if (ccid2_cwnd_network_limited(ccid2_hc_tx_sk(sk)))
 82		return CCID_PACKET_WILL_DEQUEUE_LATER;
 83	return CCID_PACKET_SEND_AT_ONCE;
 84}
 85
 86static void ccid2_change_l_ack_ratio(struct sock *sk, u32 val)
 87{
 88	u32 max_ratio = DIV_ROUND_UP(ccid2_hc_tx_sk(sk)->tx_cwnd, 2);
 89
 90	/*
 91	 * Ensure that Ack Ratio does not exceed ceil(cwnd/2), which is (2) from
 92	 * RFC 4341, 6.1.2. We ignore the statement that Ack Ratio 2 is always
 93	 * acceptable since this causes starvation/deadlock whenever cwnd < 2.
 94	 * The same problem arises when Ack Ratio is 0 (ie. Ack Ratio disabled).
 95	 */
 96	if (val == 0 || val > max_ratio) {
 97		DCCP_WARN("Limiting Ack Ratio (%u) to %u\n", val, max_ratio);
 98		val = max_ratio;
 99	}
100	dccp_feat_signal_nn_change(sk, DCCPF_ACK_RATIO,
101				   min_t(u32, val, DCCPF_ACK_RATIO_MAX));
102}
103
104static void ccid2_check_l_ack_ratio(struct sock *sk)
105{
106	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
107
108	/*
109	 * After a loss, idle period, application limited period, or RTO we
110	 * need to check that the ack ratio is still less than the congestion
111	 * window. Otherwise, we will send an entire congestion window of
112	 * packets and got no response because we haven't sent ack ratio
113	 * packets yet.
114	 * If the ack ratio does need to be reduced, we reduce it to half of
115	 * the congestion window (or 1 if that's zero) instead of to the
116	 * congestion window. This prevents problems if one ack is lost.
117	 */
118	if (dccp_feat_nn_get(sk, DCCPF_ACK_RATIO) > hc->tx_cwnd)
119		ccid2_change_l_ack_ratio(sk, hc->tx_cwnd/2 ? : 1U);
120}
121
122static void ccid2_change_l_seq_window(struct sock *sk, u64 val)
123{
124	dccp_feat_signal_nn_change(sk, DCCPF_SEQUENCE_WINDOW,
125				   clamp_val(val, DCCPF_SEQ_WMIN,
126						  DCCPF_SEQ_WMAX));
127}
128
129static void ccid2_hc_tx_rto_expire(unsigned long data)
 
 
 
 
 
 
 
 
 
 
130{
131	struct sock *sk = (struct sock *)data;
132	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
133	const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
134
135	bh_lock_sock(sk);
136	if (sock_owned_by_user(sk)) {
137		sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + HZ / 5);
138		goto out;
139	}
140
141	ccid2_pr_debug("RTO_EXPIRE\n");
142
 
 
 
143	/* back-off timer */
144	hc->tx_rto <<= 1;
145	if (hc->tx_rto > DCCP_RTO_MAX)
146		hc->tx_rto = DCCP_RTO_MAX;
147
148	/* adjust pipe, cwnd etc */
149	hc->tx_ssthresh = hc->tx_cwnd / 2;
150	if (hc->tx_ssthresh < 2)
151		hc->tx_ssthresh = 2;
152	hc->tx_cwnd	= 1;
153	hc->tx_pipe	= 0;
154
155	/* clear state about stuff we sent */
156	hc->tx_seqt = hc->tx_seqh;
157	hc->tx_packets_acked = 0;
158
159	/* clear ack ratio state. */
160	hc->tx_rpseq    = 0;
161	hc->tx_rpdupack = -1;
162	ccid2_change_l_ack_ratio(sk, 1);
163
164	/* if we were blocked before, we may now send cwnd=1 packet */
165	if (sender_was_blocked)
166		tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
167	/* restart backed-off timer */
168	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
169out:
170	bh_unlock_sock(sk);
171	sock_put(sk);
172}
173
174/*
175 *	Congestion window validation (RFC 2861).
176 */
177static bool ccid2_do_cwv = true;
178module_param(ccid2_do_cwv, bool, 0644);
179MODULE_PARM_DESC(ccid2_do_cwv, "Perform RFC2861 Congestion Window Validation");
180
181/**
182 * ccid2_update_used_window  -  Track how much of cwnd is actually used
183 * This is done in addition to CWV. The sender needs to have an idea of how many
184 * packets may be in flight, to set the local Sequence Window value accordingly
185 * (RFC 4340, 7.5.2). The CWV mechanism is exploited to keep track of the
186 * maximum-used window. We use an EWMA low-pass filter to filter out noise.
187 */
188static void ccid2_update_used_window(struct ccid2_hc_tx_sock *hc, u32 new_wnd)
189{
190	hc->tx_expected_wnd = (3 * hc->tx_expected_wnd + new_wnd) / 4;
191}
192
193/* This borrows the code of tcp_cwnd_application_limited() */
194static void ccid2_cwnd_application_limited(struct sock *sk, const u32 now)
195{
196	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
197	/* don't reduce cwnd below the initial window (IW) */
198	u32 init_win = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache),
199	    win_used = max(hc->tx_cwnd_used, init_win);
200
201	if (win_used < hc->tx_cwnd) {
202		hc->tx_ssthresh = max(hc->tx_ssthresh,
203				     (hc->tx_cwnd >> 1) + (hc->tx_cwnd >> 2));
204		hc->tx_cwnd = (hc->tx_cwnd + win_used) >> 1;
205	}
206	hc->tx_cwnd_used  = 0;
207	hc->tx_cwnd_stamp = now;
208
209	ccid2_check_l_ack_ratio(sk);
210}
211
212/* This borrows the code of tcp_cwnd_restart() */
213static void ccid2_cwnd_restart(struct sock *sk, const u32 now)
214{
215	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
216	u32 cwnd = hc->tx_cwnd, restart_cwnd,
217	    iwnd = rfc3390_bytes_to_packets(dccp_sk(sk)->dccps_mss_cache);
 
218
219	hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2));
220
221	/* don't reduce cwnd below the initial window (IW) */
222	restart_cwnd = min(cwnd, iwnd);
223	cwnd >>= (now - hc->tx_lsndtime) / hc->tx_rto;
 
 
224	hc->tx_cwnd = max(cwnd, restart_cwnd);
225
226	hc->tx_cwnd_stamp = now;
227	hc->tx_cwnd_used  = 0;
228
229	ccid2_check_l_ack_ratio(sk);
230}
231
232static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
233{
234	struct dccp_sock *dp = dccp_sk(sk);
235	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
236	const u32 now = ccid2_time_stamp;
237	struct ccid2_seq *next;
238
239	/* slow-start after idle periods (RFC 2581, RFC 2861) */
240	if (ccid2_do_cwv && !hc->tx_pipe &&
241	    (s32)(now - hc->tx_lsndtime) >= hc->tx_rto)
242		ccid2_cwnd_restart(sk, now);
243
244	hc->tx_lsndtime = now;
245	hc->tx_pipe    += 1;
246
247	/* see whether cwnd was fully used (RFC 2861), update expected window */
248	if (ccid2_cwnd_network_limited(hc)) {
249		ccid2_update_used_window(hc, hc->tx_cwnd);
250		hc->tx_cwnd_used  = 0;
251		hc->tx_cwnd_stamp = now;
252	} else {
253		if (hc->tx_pipe > hc->tx_cwnd_used)
254			hc->tx_cwnd_used = hc->tx_pipe;
255
256		ccid2_update_used_window(hc, hc->tx_cwnd_used);
257
258		if (ccid2_do_cwv && (s32)(now - hc->tx_cwnd_stamp) >= hc->tx_rto)
259			ccid2_cwnd_application_limited(sk, now);
260	}
261
262	hc->tx_seqh->ccid2s_seq   = dp->dccps_gss;
263	hc->tx_seqh->ccid2s_acked = 0;
264	hc->tx_seqh->ccid2s_sent  = now;
265
266	next = hc->tx_seqh->ccid2s_next;
267	/* check if we need to alloc more space */
268	if (next == hc->tx_seqt) {
269		if (ccid2_hc_tx_alloc_seq(hc)) {
270			DCCP_CRIT("packet history - out of memory!");
271			/* FIXME: find a more graceful way to bail out */
272			return;
273		}
274		next = hc->tx_seqh->ccid2s_next;
275		BUG_ON(next == hc->tx_seqt);
276	}
277	hc->tx_seqh = next;
278
279	ccid2_pr_debug("cwnd=%d pipe=%d\n", hc->tx_cwnd, hc->tx_pipe);
280
281	/*
282	 * FIXME: The code below is broken and the variables have been removed
283	 * from the socket struct. The `ackloss' variable was always set to 0,
284	 * and with arsent there are several problems:
285	 *  (i) it doesn't just count the number of Acks, but all sent packets;
286	 *  (ii) it is expressed in # of packets, not # of windows, so the
287	 *  comparison below uses the wrong formula: Appendix A of RFC 4341
288	 *  comes up with the number K = cwnd / (R^2 - R) of consecutive windows
289	 *  of data with no lost or marked Ack packets. If arsent were the # of
290	 *  consecutive Acks received without loss, then Ack Ratio needs to be
291	 *  decreased by 1 when
292	 *	      arsent >=  K * cwnd / R  =  cwnd^2 / (R^3 - R^2)
293	 *  where cwnd / R is the number of Acks received per window of data
294	 *  (cf. RFC 4341, App. A). The problems are that
295	 *  - arsent counts other packets as well;
296	 *  - the comparison uses a formula different from RFC 4341;
297	 *  - computing a cubic/quadratic equation each time is too complicated.
298	 *  Hence a different algorithm is needed.
299	 */
300#if 0
301	/* Ack Ratio.  Need to maintain a concept of how many windows we sent */
302	hc->tx_arsent++;
303	/* We had an ack loss in this window... */
304	if (hc->tx_ackloss) {
305		if (hc->tx_arsent >= hc->tx_cwnd) {
306			hc->tx_arsent  = 0;
307			hc->tx_ackloss = 0;
308		}
309	} else {
310		/* No acks lost up to now... */
311		/* decrease ack ratio if enough packets were sent */
312		if (dp->dccps_l_ack_ratio > 1) {
313			/* XXX don't calculate denominator each time */
314			int denom = dp->dccps_l_ack_ratio * dp->dccps_l_ack_ratio -
315				    dp->dccps_l_ack_ratio;
316
317			denom = hc->tx_cwnd * hc->tx_cwnd / denom;
318
319			if (hc->tx_arsent >= denom) {
320				ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio - 1);
321				hc->tx_arsent = 0;
322			}
323		} else {
324			/* we can't increase ack ratio further [1] */
325			hc->tx_arsent = 0; /* or maybe set it to cwnd*/
326		}
327	}
328#endif
329
330	sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
331
332#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
333	do {
334		struct ccid2_seq *seqp = hc->tx_seqt;
335
336		while (seqp != hc->tx_seqh) {
337			ccid2_pr_debug("out seq=%llu acked=%d time=%u\n",
338				       (unsigned long long)seqp->ccid2s_seq,
339				       seqp->ccid2s_acked, seqp->ccid2s_sent);
340			seqp = seqp->ccid2s_next;
341		}
342	} while (0);
343	ccid2_pr_debug("=========\n");
344#endif
345}
346
347/**
348 * ccid2_rtt_estimator - Sample RTT and compute RTO using RFC2988 algorithm
349 * This code is almost identical with TCP's tcp_rtt_estimator(), since
350 * - it has a higher sampling frequency (recommended by RFC 1323),
351 * - the RTO does not collapse into RTT due to RTTVAR going towards zero,
352 * - it is simple (cf. more complex proposals such as Eifel timer or research
353 *   which suggests that the gain should be set according to window size),
354 * - in tests it was found to work well with CCID2 [gerrit].
355 */
356static void ccid2_rtt_estimator(struct sock *sk, const long mrtt)
357{
358	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
359	long m = mrtt ? : 1;
360
361	if (hc->tx_srtt == 0) {
362		/* First measurement m */
363		hc->tx_srtt = m << 3;
364		hc->tx_mdev = m << 1;
365
366		hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk));
367		hc->tx_rttvar   = hc->tx_mdev_max;
368
369		hc->tx_rtt_seq  = dccp_sk(sk)->dccps_gss;
370	} else {
371		/* Update scaled SRTT as SRTT += 1/8 * (m - SRTT) */
372		m -= (hc->tx_srtt >> 3);
373		hc->tx_srtt += m;
374
375		/* Similarly, update scaled mdev with regard to |m| */
376		if (m < 0) {
377			m = -m;
378			m -= (hc->tx_mdev >> 2);
379			/*
380			 * This neutralises RTO increase when RTT < SRTT - mdev
381			 * (see P. Sarolahti, A. Kuznetsov,"Congestion Control
382			 * in Linux TCP", USENIX 2002, pp. 49-62).
383			 */
384			if (m > 0)
385				m >>= 3;
386		} else {
387			m -= (hc->tx_mdev >> 2);
388		}
389		hc->tx_mdev += m;
390
391		if (hc->tx_mdev > hc->tx_mdev_max) {
392			hc->tx_mdev_max = hc->tx_mdev;
393			if (hc->tx_mdev_max > hc->tx_rttvar)
394				hc->tx_rttvar = hc->tx_mdev_max;
395		}
396
397		/*
398		 * Decay RTTVAR at most once per flight, exploiting that
399		 *  1) pipe <= cwnd <= Sequence_Window = W  (RFC 4340, 7.5.2)
400		 *  2) AWL = GSS-W+1 <= GAR <= GSS          (RFC 4340, 7.5.1)
401		 * GAR is a useful bound for FlightSize = pipe.
402		 * AWL is probably too low here, as it over-estimates pipe.
403		 */
404		if (after48(dccp_sk(sk)->dccps_gar, hc->tx_rtt_seq)) {
405			if (hc->tx_mdev_max < hc->tx_rttvar)
406				hc->tx_rttvar -= (hc->tx_rttvar -
407						  hc->tx_mdev_max) >> 2;
408			hc->tx_rtt_seq  = dccp_sk(sk)->dccps_gss;
409			hc->tx_mdev_max = tcp_rto_min(sk);
410		}
411	}
412
413	/*
414	 * Set RTO from SRTT and RTTVAR
415	 * As in TCP, 4 * RTTVAR >= TCP_RTO_MIN, giving a minimum RTO of 200 ms.
416	 * This agrees with RFC 4341, 5:
417	 *	"Because DCCP does not retransmit data, DCCP does not require
418	 *	 TCP's recommended minimum timeout of one second".
419	 */
420	hc->tx_rto = (hc->tx_srtt >> 3) + hc->tx_rttvar;
421
422	if (hc->tx_rto > DCCP_RTO_MAX)
423		hc->tx_rto = DCCP_RTO_MAX;
424}
425
426static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
427			  unsigned int *maxincr)
428{
429	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
430	struct dccp_sock *dp = dccp_sk(sk);
431	int r_seq_used = hc->tx_cwnd / dp->dccps_l_ack_ratio;
432
433	if (hc->tx_cwnd < dp->dccps_l_seq_win &&
434	    r_seq_used < dp->dccps_r_seq_win) {
435		if (hc->tx_cwnd < hc->tx_ssthresh) {
436			if (*maxincr > 0 && ++hc->tx_packets_acked >= 2) {
437				hc->tx_cwnd += 1;
438				*maxincr    -= 1;
439				hc->tx_packets_acked = 0;
440			}
441		} else if (++hc->tx_packets_acked >= hc->tx_cwnd) {
442			hc->tx_cwnd += 1;
443			hc->tx_packets_acked = 0;
444		}
445	}
446
447	/*
448	 * Adjust the local sequence window and the ack ratio to allow about
449	 * 5 times the number of packets in the network (RFC 4340 7.5.2)
450	 */
451	if (r_seq_used * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_r_seq_win)
452		ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio * 2);
453	else if (r_seq_used * CCID2_WIN_CHANGE_FACTOR < dp->dccps_r_seq_win/2)
454		ccid2_change_l_ack_ratio(sk, dp->dccps_l_ack_ratio / 2 ? : 1U);
455
456	if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR >= dp->dccps_l_seq_win)
457		ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win * 2);
458	else if (hc->tx_cwnd * CCID2_WIN_CHANGE_FACTOR < dp->dccps_l_seq_win/2)
459		ccid2_change_l_seq_window(sk, dp->dccps_l_seq_win / 2);
460
461	/*
462	 * FIXME: RTT is sampled several times per acknowledgment (for each
463	 * entry in the Ack Vector), instead of once per Ack (as in TCP SACK).
464	 * This causes the RTT to be over-estimated, since the older entries
465	 * in the Ack Vector have earlier sending times.
466	 * The cleanest solution is to not use the ccid2s_sent field at all
467	 * and instead use DCCP timestamps: requires changes in other places.
468	 */
469	ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent);
470}
471
472static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
473{
474	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
475
476	if ((s32)(seqp->ccid2s_sent - hc->tx_last_cong) < 0) {
477		ccid2_pr_debug("Multiple losses in an RTT---treating as one\n");
478		return;
479	}
480
481	hc->tx_last_cong = ccid2_time_stamp;
482
483	hc->tx_cwnd      = hc->tx_cwnd / 2 ? : 1U;
484	hc->tx_ssthresh  = max(hc->tx_cwnd, 2U);
485
486	ccid2_check_l_ack_ratio(sk);
487}
488
489static int ccid2_hc_tx_parse_options(struct sock *sk, u8 packet_type,
490				     u8 option, u8 *optval, u8 optlen)
491{
492	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
493
494	switch (option) {
495	case DCCPO_ACK_VECTOR_0:
496	case DCCPO_ACK_VECTOR_1:
497		return dccp_ackvec_parsed_add(&hc->tx_av_chunks, optval, optlen,
498					      option - DCCPO_ACK_VECTOR_0);
499	}
500	return 0;
501}
502
503static void ccid2_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb)
504{
505	struct dccp_sock *dp = dccp_sk(sk);
506	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
507	const bool sender_was_blocked = ccid2_cwnd_network_limited(hc);
508	struct dccp_ackvec_parsed *avp;
509	u64 ackno, seqno;
510	struct ccid2_seq *seqp;
511	int done = 0;
512	unsigned int maxincr = 0;
513
514	/* check reverse path congestion */
515	seqno = DCCP_SKB_CB(skb)->dccpd_seq;
516
517	/* XXX this whole "algorithm" is broken.  Need to fix it to keep track
518	 * of the seqnos of the dupacks so that rpseq and rpdupack are correct
519	 * -sorbo.
520	 */
521	/* need to bootstrap */
522	if (hc->tx_rpdupack == -1) {
523		hc->tx_rpdupack = 0;
524		hc->tx_rpseq    = seqno;
525	} else {
526		/* check if packet is consecutive */
527		if (dccp_delta_seqno(hc->tx_rpseq, seqno) == 1)
528			hc->tx_rpseq = seqno;
529		/* it's a later packet */
530		else if (after48(seqno, hc->tx_rpseq)) {
531			hc->tx_rpdupack++;
532
533			/* check if we got enough dupacks */
534			if (hc->tx_rpdupack >= NUMDUPACK) {
535				hc->tx_rpdupack = -1; /* XXX lame */
536				hc->tx_rpseq    = 0;
537#ifdef __CCID2_COPES_GRACEFULLY_WITH_ACK_CONGESTION_CONTROL__
538				/*
539				 * FIXME: Ack Congestion Control is broken; in
540				 * the current state instabilities occurred with
541				 * Ack Ratios greater than 1; causing hang-ups
542				 * and long RTO timeouts. This needs to be fixed
543				 * before opening up dynamic changes. -- gerrit
544				 */
545				ccid2_change_l_ack_ratio(sk, 2 * dp->dccps_l_ack_ratio);
546#endif
547			}
548		}
549	}
550
551	/* check forward path congestion */
552	if (dccp_packet_without_ack(skb))
553		return;
554
555	/* still didn't send out new data packets */
556	if (hc->tx_seqh == hc->tx_seqt)
557		goto done;
558
559	ackno = DCCP_SKB_CB(skb)->dccpd_ack_seq;
560	if (after48(ackno, hc->tx_high_ack))
561		hc->tx_high_ack = ackno;
562
563	seqp = hc->tx_seqt;
564	while (before48(seqp->ccid2s_seq, ackno)) {
565		seqp = seqp->ccid2s_next;
566		if (seqp == hc->tx_seqh) {
567			seqp = hc->tx_seqh->ccid2s_prev;
568			break;
569		}
570	}
571
572	/*
573	 * In slow-start, cwnd can increase up to a maximum of Ack Ratio/2
574	 * packets per acknowledgement. Rounding up avoids that cwnd is not
575	 * advanced when Ack Ratio is 1 and gives a slight edge otherwise.
576	 */
577	if (hc->tx_cwnd < hc->tx_ssthresh)
578		maxincr = DIV_ROUND_UP(dp->dccps_l_ack_ratio, 2);
579
580	/* go through all ack vectors */
581	list_for_each_entry(avp, &hc->tx_av_chunks, node) {
582		/* go through this ack vector */
583		for (; avp->len--; avp->vec++) {
584			u64 ackno_end_rl = SUB48(ackno,
585						 dccp_ackvec_runlen(avp->vec));
586
587			ccid2_pr_debug("ackvec %llu |%u,%u|\n",
588				       (unsigned long long)ackno,
589				       dccp_ackvec_state(avp->vec) >> 6,
590				       dccp_ackvec_runlen(avp->vec));
591			/* if the seqno we are analyzing is larger than the
592			 * current ackno, then move towards the tail of our
593			 * seqnos.
594			 */
595			while (after48(seqp->ccid2s_seq, ackno)) {
596				if (seqp == hc->tx_seqt) {
597					done = 1;
598					break;
599				}
600				seqp = seqp->ccid2s_prev;
601			}
602			if (done)
603				break;
604
605			/* check all seqnos in the range of the vector
606			 * run length
607			 */
608			while (between48(seqp->ccid2s_seq,ackno_end_rl,ackno)) {
609				const u8 state = dccp_ackvec_state(avp->vec);
610
611				/* new packet received or marked */
612				if (state != DCCPAV_NOT_RECEIVED &&
613				    !seqp->ccid2s_acked) {
614					if (state == DCCPAV_ECN_MARKED)
615						ccid2_congestion_event(sk,
616								       seqp);
617					else
618						ccid2_new_ack(sk, seqp,
619							      &maxincr);
620
621					seqp->ccid2s_acked = 1;
622					ccid2_pr_debug("Got ack for %llu\n",
623						       (unsigned long long)seqp->ccid2s_seq);
624					hc->tx_pipe--;
625				}
626				if (seqp == hc->tx_seqt) {
627					done = 1;
628					break;
629				}
630				seqp = seqp->ccid2s_prev;
631			}
632			if (done)
633				break;
634
635			ackno = SUB48(ackno_end_rl, 1);
636		}
637		if (done)
638			break;
639	}
640
641	/* The state about what is acked should be correct now
642	 * Check for NUMDUPACK
643	 */
644	seqp = hc->tx_seqt;
645	while (before48(seqp->ccid2s_seq, hc->tx_high_ack)) {
646		seqp = seqp->ccid2s_next;
647		if (seqp == hc->tx_seqh) {
648			seqp = hc->tx_seqh->ccid2s_prev;
649			break;
650		}
651	}
652	done = 0;
653	while (1) {
654		if (seqp->ccid2s_acked) {
655			done++;
656			if (done == NUMDUPACK)
657				break;
658		}
659		if (seqp == hc->tx_seqt)
660			break;
661		seqp = seqp->ccid2s_prev;
662	}
663
664	/* If there are at least 3 acknowledgements, anything unacknowledged
665	 * below the last sequence number is considered lost
666	 */
667	if (done == NUMDUPACK) {
668		struct ccid2_seq *last_acked = seqp;
669
670		/* check for lost packets */
671		while (1) {
672			if (!seqp->ccid2s_acked) {
673				ccid2_pr_debug("Packet lost: %llu\n",
674					       (unsigned long long)seqp->ccid2s_seq);
675				/* XXX need to traverse from tail -> head in
676				 * order to detect multiple congestion events in
677				 * one ack vector.
678				 */
679				ccid2_congestion_event(sk, seqp);
680				hc->tx_pipe--;
681			}
682			if (seqp == hc->tx_seqt)
683				break;
684			seqp = seqp->ccid2s_prev;
685		}
686
687		hc->tx_seqt = last_acked;
688	}
689
690	/* trim acked packets in tail */
691	while (hc->tx_seqt != hc->tx_seqh) {
692		if (!hc->tx_seqt->ccid2s_acked)
693			break;
694
695		hc->tx_seqt = hc->tx_seqt->ccid2s_next;
696	}
697
698	/* restart RTO timer if not all outstanding data has been acked */
699	if (hc->tx_pipe == 0)
700		sk_stop_timer(sk, &hc->tx_rtotimer);
701	else
702		sk_reset_timer(sk, &hc->tx_rtotimer, jiffies + hc->tx_rto);
703done:
704	/* check if incoming Acks allow pending packets to be sent */
705	if (sender_was_blocked && !ccid2_cwnd_network_limited(hc))
706		tasklet_schedule(&dccp_sk(sk)->dccps_xmitlet);
707	dccp_ackvec_parsed_cleanup(&hc->tx_av_chunks);
708}
709
710static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
711{
712	struct ccid2_hc_tx_sock *hc = ccid_priv(ccid);
713	struct dccp_sock *dp = dccp_sk(sk);
714	u32 max_ratio;
715
716	/* RFC 4341, 5: initialise ssthresh to arbitrarily high (max) value */
717	hc->tx_ssthresh = ~0U;
718
719	/* Use larger initial windows (RFC 4341, section 5). */
720	hc->tx_cwnd = rfc3390_bytes_to_packets(dp->dccps_mss_cache);
721	hc->tx_expected_wnd = hc->tx_cwnd;
722
723	/* Make sure that Ack Ratio is enabled and within bounds. */
724	max_ratio = DIV_ROUND_UP(hc->tx_cwnd, 2);
725	if (dp->dccps_l_ack_ratio == 0 || dp->dccps_l_ack_ratio > max_ratio)
726		dp->dccps_l_ack_ratio = max_ratio;
727
728	/* XXX init ~ to window size... */
729	if (ccid2_hc_tx_alloc_seq(hc))
730		return -ENOMEM;
731
732	hc->tx_rto	 = DCCP_TIMEOUT_INIT;
733	hc->tx_rpdupack  = -1;
734	hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_time_stamp;
735	hc->tx_cwnd_used = 0;
736	setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
737			(unsigned long)sk);
738	INIT_LIST_HEAD(&hc->tx_av_chunks);
739	return 0;
740}
741
742static void ccid2_hc_tx_exit(struct sock *sk)
743{
744	struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
745	int i;
746
747	sk_stop_timer(sk, &hc->tx_rtotimer);
748
749	for (i = 0; i < hc->tx_seqbufc; i++)
750		kfree(hc->tx_seqbuf[i]);
751	hc->tx_seqbufc = 0;
 
752}
753
754static void ccid2_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb)
755{
756	struct ccid2_hc_rx_sock *hc = ccid2_hc_rx_sk(sk);
757
758	if (!dccp_data_packet(skb))
759		return;
760
761	if (++hc->rx_num_data_pkts >= dccp_sk(sk)->dccps_r_ack_ratio) {
762		dccp_send_ack(sk);
763		hc->rx_num_data_pkts = 0;
764	}
765}
766
767struct ccid_operations ccid2_ops = {
768	.ccid_id		  = DCCPC_CCID2,
769	.ccid_name		  = "TCP-like",
770	.ccid_hc_tx_obj_size	  = sizeof(struct ccid2_hc_tx_sock),
771	.ccid_hc_tx_init	  = ccid2_hc_tx_init,
772	.ccid_hc_tx_exit	  = ccid2_hc_tx_exit,
773	.ccid_hc_tx_send_packet	  = ccid2_hc_tx_send_packet,
774	.ccid_hc_tx_packet_sent	  = ccid2_hc_tx_packet_sent,
775	.ccid_hc_tx_parse_options = ccid2_hc_tx_parse_options,
776	.ccid_hc_tx_packet_recv	  = ccid2_hc_tx_packet_recv,
777	.ccid_hc_rx_obj_size	  = sizeof(struct ccid2_hc_rx_sock),
778	.ccid_hc_rx_packet_recv	  = ccid2_hc_rx_packet_recv,
779};
780
781#ifdef CONFIG_IP_DCCP_CCID2_DEBUG
782module_param(ccid2_debug, bool, 0644);
783MODULE_PARM_DESC(ccid2_debug, "Enable CCID-2 debug messages");
784#endif