Loading...
1/*
2 * TCP Westwood+: end-to-end bandwidth estimation for TCP
3 *
4 * Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
5 *
6 * Support at http://c3lab.poliba.it/index.php/Westwood
7 * Main references in literature:
8 *
9 * - Mascolo S, Casetti, M. Gerla et al.
10 * "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
11 *
12 * - A. Grieco, s. Mascolo
13 * "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
14 * Comm. Review, 2004
15 *
16 * - A. Dell'Aera, L. Grieco, S. Mascolo.
17 * "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
18 * A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
19 *
20 * Westwood+ employs end-to-end bandwidth measurement to set cwnd and
21 * ssthresh after packet loss. The probing phase is as the original Reno.
22 */
23
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/skbuff.h>
27#include <linux/inet_diag.h>
28#include <net/tcp.h>
29
30/* TCP Westwood structure */
31struct westwood {
32 u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
33 u32 bw_est; /* bandwidth estimate */
34 u32 rtt_win_sx; /* here starts a new evaluation... */
35 u32 bk;
36 u32 snd_una; /* used for evaluating the number of acked bytes */
37 u32 cumul_ack;
38 u32 accounted;
39 u32 rtt;
40 u32 rtt_min; /* minimum observed RTT */
41 u8 first_ack; /* flag which infers that this is the first ack */
42 u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/
43};
44
45
46/* TCP Westwood functions and constants */
47#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
48#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
49
50/*
51 * @tcp_westwood_create
52 * This function initializes fields used in TCP Westwood+,
53 * it is called after the initial SYN, so the sequence numbers
54 * are correct but new passive connections we have no
55 * information about RTTmin at this time so we simply set it to
56 * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
57 * since in this way we're sure it will be updated in a consistent
58 * way as soon as possible. It will reasonably happen within the first
59 * RTT period of the connection lifetime.
60 */
61static void tcp_westwood_init(struct sock *sk)
62{
63 struct westwood *w = inet_csk_ca(sk);
64
65 w->bk = 0;
66 w->bw_ns_est = 0;
67 w->bw_est = 0;
68 w->accounted = 0;
69 w->cumul_ack = 0;
70 w->reset_rtt_min = 1;
71 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
72 w->rtt_win_sx = tcp_time_stamp;
73 w->snd_una = tcp_sk(sk)->snd_una;
74 w->first_ack = 1;
75}
76
77/*
78 * @westwood_do_filter
79 * Low-pass filter. Implemented using constant coefficients.
80 */
81static inline u32 westwood_do_filter(u32 a, u32 b)
82{
83 return ((7 * a) + b) >> 3;
84}
85
86static void westwood_filter(struct westwood *w, u32 delta)
87{
88 /* If the filter is empty fill it with the first sample of bandwidth */
89 if (w->bw_ns_est == 0 && w->bw_est == 0) {
90 w->bw_ns_est = w->bk / delta;
91 w->bw_est = w->bw_ns_est;
92 } else {
93 w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
94 w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
95 }
96}
97
98/*
99 * @westwood_pkts_acked
100 * Called after processing group of packets.
101 * but all westwood needs is the last sample of srtt.
102 */
103static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
104{
105 struct westwood *w = inet_csk_ca(sk);
106
107 if (rtt > 0)
108 w->rtt = usecs_to_jiffies(rtt);
109}
110
111/*
112 * @westwood_update_window
113 * It updates RTT evaluation window if it is the right moment to do
114 * it. If so it calls filter for evaluating bandwidth.
115 */
116static void westwood_update_window(struct sock *sk)
117{
118 struct westwood *w = inet_csk_ca(sk);
119 s32 delta = tcp_time_stamp - w->rtt_win_sx;
120
121 /* Initialize w->snd_una with the first acked sequence number in order
122 * to fix mismatch between tp->snd_una and w->snd_una for the first
123 * bandwidth sample
124 */
125 if (w->first_ack) {
126 w->snd_una = tcp_sk(sk)->snd_una;
127 w->first_ack = 0;
128 }
129
130 /*
131 * See if a RTT-window has passed.
132 * Be careful since if RTT is less than
133 * 50ms we don't filter but we continue 'building the sample'.
134 * This minimum limit was chosen since an estimation on small
135 * time intervals is better to avoid...
136 * Obviously on a LAN we reasonably will always have
137 * right_bound = left_bound + WESTWOOD_RTT_MIN
138 */
139 if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
140 westwood_filter(w, delta);
141
142 w->bk = 0;
143 w->rtt_win_sx = tcp_time_stamp;
144 }
145}
146
147static inline void update_rtt_min(struct westwood *w)
148{
149 if (w->reset_rtt_min) {
150 w->rtt_min = w->rtt;
151 w->reset_rtt_min = 0;
152 } else
153 w->rtt_min = min(w->rtt, w->rtt_min);
154}
155
156
157/*
158 * @westwood_fast_bw
159 * It is called when we are in fast path. In particular it is called when
160 * header prediction is successful. In such case in fact update is
161 * straight forward and doesn't need any particular care.
162 */
163static inline void westwood_fast_bw(struct sock *sk)
164{
165 const struct tcp_sock *tp = tcp_sk(sk);
166 struct westwood *w = inet_csk_ca(sk);
167
168 westwood_update_window(sk);
169
170 w->bk += tp->snd_una - w->snd_una;
171 w->snd_una = tp->snd_una;
172 update_rtt_min(w);
173}
174
175/*
176 * @westwood_acked_count
177 * This function evaluates cumul_ack for evaluating bk in case of
178 * delayed or partial acks.
179 */
180static inline u32 westwood_acked_count(struct sock *sk)
181{
182 const struct tcp_sock *tp = tcp_sk(sk);
183 struct westwood *w = inet_csk_ca(sk);
184
185 w->cumul_ack = tp->snd_una - w->snd_una;
186
187 /* If cumul_ack is 0 this is a dupack since it's not moving
188 * tp->snd_una.
189 */
190 if (!w->cumul_ack) {
191 w->accounted += tp->mss_cache;
192 w->cumul_ack = tp->mss_cache;
193 }
194
195 if (w->cumul_ack > tp->mss_cache) {
196 /* Partial or delayed ack */
197 if (w->accounted >= w->cumul_ack) {
198 w->accounted -= w->cumul_ack;
199 w->cumul_ack = tp->mss_cache;
200 } else {
201 w->cumul_ack -= w->accounted;
202 w->accounted = 0;
203 }
204 }
205
206 w->snd_una = tp->snd_una;
207
208 return w->cumul_ack;
209}
210
211
212/*
213 * TCP Westwood
214 * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
215 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
216 * so avoids ever returning 0.
217 */
218static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
219{
220 const struct tcp_sock *tp = tcp_sk(sk);
221 const struct westwood *w = inet_csk_ca(sk);
222 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
223}
224
225static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
226{
227 struct tcp_sock *tp = tcp_sk(sk);
228 struct westwood *w = inet_csk_ca(sk);
229
230 switch (event) {
231 case CA_EVENT_FAST_ACK:
232 westwood_fast_bw(sk);
233 break;
234
235 case CA_EVENT_COMPLETE_CWR:
236 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
237 break;
238
239 case CA_EVENT_LOSS:
240 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
241 /* Update RTT_min when next ack arrives */
242 w->reset_rtt_min = 1;
243 break;
244
245 case CA_EVENT_SLOW_ACK:
246 westwood_update_window(sk);
247 w->bk += westwood_acked_count(sk);
248 update_rtt_min(w);
249 break;
250
251 default:
252 /* don't care */
253 break;
254 }
255}
256
257
258/* Extract info for Tcp socket info provided via netlink. */
259static void tcp_westwood_info(struct sock *sk, u32 ext,
260 struct sk_buff *skb)
261{
262 const struct westwood *ca = inet_csk_ca(sk);
263 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
264 struct tcpvegas_info info = {
265 .tcpv_enabled = 1,
266 .tcpv_rtt = jiffies_to_usecs(ca->rtt),
267 .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
268 };
269
270 nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
271 }
272}
273
274
275static struct tcp_congestion_ops tcp_westwood __read_mostly = {
276 .init = tcp_westwood_init,
277 .ssthresh = tcp_reno_ssthresh,
278 .cong_avoid = tcp_reno_cong_avoid,
279 .cwnd_event = tcp_westwood_event,
280 .get_info = tcp_westwood_info,
281 .pkts_acked = tcp_westwood_pkts_acked,
282
283 .owner = THIS_MODULE,
284 .name = "westwood"
285};
286
287static int __init tcp_westwood_register(void)
288{
289 BUILD_BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
290 return tcp_register_congestion_control(&tcp_westwood);
291}
292
293static void __exit tcp_westwood_unregister(void)
294{
295 tcp_unregister_congestion_control(&tcp_westwood);
296}
297
298module_init(tcp_westwood_register);
299module_exit(tcp_westwood_unregister);
300
301MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
302MODULE_LICENSE("GPL");
303MODULE_DESCRIPTION("TCP Westwood+");
1/*
2 * TCP Westwood+: end-to-end bandwidth estimation for TCP
3 *
4 * Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
5 *
6 * Support at http://c3lab.poliba.it/index.php/Westwood
7 * Main references in literature:
8 *
9 * - Mascolo S, Casetti, M. Gerla et al.
10 * "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
11 *
12 * - A. Grieco, s. Mascolo
13 * "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
14 * Comm. Review, 2004
15 *
16 * - A. Dell'Aera, L. Grieco, S. Mascolo.
17 * "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
18 * A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
19 *
20 * Westwood+ employs end-to-end bandwidth measurement to set cwnd and
21 * ssthresh after packet loss. The probing phase is as the original Reno.
22 */
23
24#include <linux/mm.h>
25#include <linux/module.h>
26#include <linux/skbuff.h>
27#include <linux/inet_diag.h>
28#include <net/tcp.h>
29
30/* TCP Westwood structure */
31struct westwood {
32 u32 bw_ns_est; /* first bandwidth estimation..not too smoothed 8) */
33 u32 bw_est; /* bandwidth estimate */
34 u32 rtt_win_sx; /* here starts a new evaluation... */
35 u32 bk;
36 u32 snd_una; /* used for evaluating the number of acked bytes */
37 u32 cumul_ack;
38 u32 accounted;
39 u32 rtt;
40 u32 rtt_min; /* minimum observed RTT */
41 u8 first_ack; /* flag which infers that this is the first ack */
42 u8 reset_rtt_min; /* Reset RTT min to next RTT sample*/
43};
44
45/* TCP Westwood functions and constants */
46#define TCP_WESTWOOD_RTT_MIN (HZ/20) /* 50ms */
47#define TCP_WESTWOOD_INIT_RTT (20*HZ) /* maybe too conservative?! */
48
49/*
50 * @tcp_westwood_create
51 * This function initializes fields used in TCP Westwood+,
52 * it is called after the initial SYN, so the sequence numbers
53 * are correct but new passive connections we have no
54 * information about RTTmin at this time so we simply set it to
55 * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
56 * since in this way we're sure it will be updated in a consistent
57 * way as soon as possible. It will reasonably happen within the first
58 * RTT period of the connection lifetime.
59 */
60static void tcp_westwood_init(struct sock *sk)
61{
62 struct westwood *w = inet_csk_ca(sk);
63
64 w->bk = 0;
65 w->bw_ns_est = 0;
66 w->bw_est = 0;
67 w->accounted = 0;
68 w->cumul_ack = 0;
69 w->reset_rtt_min = 1;
70 w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
71 w->rtt_win_sx = tcp_time_stamp;
72 w->snd_una = tcp_sk(sk)->snd_una;
73 w->first_ack = 1;
74}
75
76/*
77 * @westwood_do_filter
78 * Low-pass filter. Implemented using constant coefficients.
79 */
80static inline u32 westwood_do_filter(u32 a, u32 b)
81{
82 return ((7 * a) + b) >> 3;
83}
84
85static void westwood_filter(struct westwood *w, u32 delta)
86{
87 /* If the filter is empty fill it with the first sample of bandwidth */
88 if (w->bw_ns_est == 0 && w->bw_est == 0) {
89 w->bw_ns_est = w->bk / delta;
90 w->bw_est = w->bw_ns_est;
91 } else {
92 w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
93 w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
94 }
95}
96
97/*
98 * @westwood_pkts_acked
99 * Called after processing group of packets.
100 * but all westwood needs is the last sample of srtt.
101 */
102static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
103{
104 struct westwood *w = inet_csk_ca(sk);
105
106 if (rtt > 0)
107 w->rtt = usecs_to_jiffies(rtt);
108}
109
110/*
111 * @westwood_update_window
112 * It updates RTT evaluation window if it is the right moment to do
113 * it. If so it calls filter for evaluating bandwidth.
114 */
115static void westwood_update_window(struct sock *sk)
116{
117 struct westwood *w = inet_csk_ca(sk);
118 s32 delta = tcp_time_stamp - w->rtt_win_sx;
119
120 /* Initialize w->snd_una with the first acked sequence number in order
121 * to fix mismatch between tp->snd_una and w->snd_una for the first
122 * bandwidth sample
123 */
124 if (w->first_ack) {
125 w->snd_una = tcp_sk(sk)->snd_una;
126 w->first_ack = 0;
127 }
128
129 /*
130 * See if a RTT-window has passed.
131 * Be careful since if RTT is less than
132 * 50ms we don't filter but we continue 'building the sample'.
133 * This minimum limit was chosen since an estimation on small
134 * time intervals is better to avoid...
135 * Obviously on a LAN we reasonably will always have
136 * right_bound = left_bound + WESTWOOD_RTT_MIN
137 */
138 if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
139 westwood_filter(w, delta);
140
141 w->bk = 0;
142 w->rtt_win_sx = tcp_time_stamp;
143 }
144}
145
146static inline void update_rtt_min(struct westwood *w)
147{
148 if (w->reset_rtt_min) {
149 w->rtt_min = w->rtt;
150 w->reset_rtt_min = 0;
151 } else
152 w->rtt_min = min(w->rtt, w->rtt_min);
153}
154
155/*
156 * @westwood_fast_bw
157 * It is called when we are in fast path. In particular it is called when
158 * header prediction is successful. In such case in fact update is
159 * straight forward and doesn't need any particular care.
160 */
161static inline void westwood_fast_bw(struct sock *sk)
162{
163 const struct tcp_sock *tp = tcp_sk(sk);
164 struct westwood *w = inet_csk_ca(sk);
165
166 westwood_update_window(sk);
167
168 w->bk += tp->snd_una - w->snd_una;
169 w->snd_una = tp->snd_una;
170 update_rtt_min(w);
171}
172
173/*
174 * @westwood_acked_count
175 * This function evaluates cumul_ack for evaluating bk in case of
176 * delayed or partial acks.
177 */
178static inline u32 westwood_acked_count(struct sock *sk)
179{
180 const struct tcp_sock *tp = tcp_sk(sk);
181 struct westwood *w = inet_csk_ca(sk);
182
183 w->cumul_ack = tp->snd_una - w->snd_una;
184
185 /* If cumul_ack is 0 this is a dupack since it's not moving
186 * tp->snd_una.
187 */
188 if (!w->cumul_ack) {
189 w->accounted += tp->mss_cache;
190 w->cumul_ack = tp->mss_cache;
191 }
192
193 if (w->cumul_ack > tp->mss_cache) {
194 /* Partial or delayed ack */
195 if (w->accounted >= w->cumul_ack) {
196 w->accounted -= w->cumul_ack;
197 w->cumul_ack = tp->mss_cache;
198 } else {
199 w->cumul_ack -= w->accounted;
200 w->accounted = 0;
201 }
202 }
203
204 w->snd_una = tp->snd_una;
205
206 return w->cumul_ack;
207}
208
209/*
210 * TCP Westwood
211 * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
212 * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
213 * so avoids ever returning 0.
214 */
215static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
216{
217 const struct tcp_sock *tp = tcp_sk(sk);
218 const struct westwood *w = inet_csk_ca(sk);
219
220 return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
221}
222
223static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
224{
225 if (ack_flags & CA_ACK_SLOWPATH) {
226 struct westwood *w = inet_csk_ca(sk);
227
228 westwood_update_window(sk);
229 w->bk += westwood_acked_count(sk);
230
231 update_rtt_min(w);
232 return;
233 }
234
235 westwood_fast_bw(sk);
236}
237
238static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
239{
240 struct tcp_sock *tp = tcp_sk(sk);
241 struct westwood *w = inet_csk_ca(sk);
242
243 switch (event) {
244 case CA_EVENT_COMPLETE_CWR:
245 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
246 break;
247 case CA_EVENT_LOSS:
248 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
249 /* Update RTT_min when next ack arrives */
250 w->reset_rtt_min = 1;
251 break;
252 default:
253 /* don't care */
254 break;
255 }
256}
257
258/* Extract info for Tcp socket info provided via netlink. */
259static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr,
260 union tcp_cc_info *info)
261{
262 const struct westwood *ca = inet_csk_ca(sk);
263
264 if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
265 info->vegas.tcpv_enabled = 1;
266 info->vegas.tcpv_rttcnt = 0;
267 info->vegas.tcpv_rtt = jiffies_to_usecs(ca->rtt),
268 info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
269
270 *attr = INET_DIAG_VEGASINFO;
271 return sizeof(struct tcpvegas_info);
272 }
273 return 0;
274}
275
276static struct tcp_congestion_ops tcp_westwood __read_mostly = {
277 .init = tcp_westwood_init,
278 .ssthresh = tcp_reno_ssthresh,
279 .cong_avoid = tcp_reno_cong_avoid,
280 .cwnd_event = tcp_westwood_event,
281 .in_ack_event = tcp_westwood_ack,
282 .get_info = tcp_westwood_info,
283 .pkts_acked = tcp_westwood_pkts_acked,
284
285 .owner = THIS_MODULE,
286 .name = "westwood"
287};
288
289static int __init tcp_westwood_register(void)
290{
291 BUILD_BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
292 return tcp_register_congestion_control(&tcp_westwood);
293}
294
295static void __exit tcp_westwood_unregister(void)
296{
297 tcp_unregister_congestion_control(&tcp_westwood);
298}
299
300module_init(tcp_westwood_register);
301module_exit(tcp_westwood_unregister);
302
303MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
304MODULE_LICENSE("GPL");
305MODULE_DESCRIPTION("TCP Westwood+");