Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3
4/* WARNING: This implemenation is not necessarily the same
5 * as the tcp_dctcp.c. The purpose is mainly for testing
6 * the kernel BPF logic.
7 */
8
9#include <stddef.h>
10#include <linux/bpf.h>
11#include <linux/types.h>
12#include <bpf/bpf_helpers.h>
13#include <bpf/bpf_tracing.h>
14#include "bpf_tcp_helpers.h"
15
16char _license[] SEC("license") = "GPL";
17
18int stg_result = 0;
19
20struct {
21 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
22 __uint(map_flags, BPF_F_NO_PREALLOC);
23 __type(key, int);
24 __type(value, int);
25} sk_stg_map SEC(".maps");
26
27#define DCTCP_MAX_ALPHA 1024U
28
29struct dctcp {
30 __u32 old_delivered;
31 __u32 old_delivered_ce;
32 __u32 prior_rcv_nxt;
33 __u32 dctcp_alpha;
34 __u32 next_seq;
35 __u32 ce_state;
36 __u32 loss_cwnd;
37};
38
39static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
40static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
41
42static __always_inline void dctcp_reset(const struct tcp_sock *tp,
43 struct dctcp *ca)
44{
45 ca->next_seq = tp->snd_nxt;
46
47 ca->old_delivered = tp->delivered;
48 ca->old_delivered_ce = tp->delivered_ce;
49}
50
51SEC("struct_ops/dctcp_init")
52void BPF_PROG(dctcp_init, struct sock *sk)
53{
54 const struct tcp_sock *tp = tcp_sk(sk);
55 struct dctcp *ca = inet_csk_ca(sk);
56 int *stg;
57
58 ca->prior_rcv_nxt = tp->rcv_nxt;
59 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
60 ca->loss_cwnd = 0;
61 ca->ce_state = 0;
62
63 stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0);
64 if (stg) {
65 stg_result = *stg;
66 bpf_sk_storage_delete(&sk_stg_map, (void *)tp);
67 }
68 dctcp_reset(tp, ca);
69}
70
71SEC("struct_ops/dctcp_ssthresh")
72__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
73{
74 struct dctcp *ca = inet_csk_ca(sk);
75 struct tcp_sock *tp = tcp_sk(sk);
76
77 ca->loss_cwnd = tp->snd_cwnd;
78 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
79}
80
81SEC("struct_ops/dctcp_update_alpha")
82void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
83{
84 const struct tcp_sock *tp = tcp_sk(sk);
85 struct dctcp *ca = inet_csk_ca(sk);
86
87 /* Expired RTT */
88 if (!before(tp->snd_una, ca->next_seq)) {
89 __u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
90 __u32 alpha = ca->dctcp_alpha;
91
92 /* alpha = (1 - g) * alpha + g * F */
93
94 alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
95 if (delivered_ce) {
96 __u32 delivered = tp->delivered - ca->old_delivered;
97
98 /* If dctcp_shift_g == 1, a 32bit value would overflow
99 * after 8 M packets.
100 */
101 delivered_ce <<= (10 - dctcp_shift_g);
102 delivered_ce /= max(1U, delivered);
103
104 alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
105 }
106 ca->dctcp_alpha = alpha;
107 dctcp_reset(tp, ca);
108 }
109}
110
111static __always_inline void dctcp_react_to_loss(struct sock *sk)
112{
113 struct dctcp *ca = inet_csk_ca(sk);
114 struct tcp_sock *tp = tcp_sk(sk);
115
116 ca->loss_cwnd = tp->snd_cwnd;
117 tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
118}
119
120SEC("struct_ops/dctcp_state")
121void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
122{
123 if (new_state == TCP_CA_Recovery &&
124 new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
125 dctcp_react_to_loss(sk);
126 /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
127 * one loss-adjustment per RTT.
128 */
129}
130
131static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
132{
133 struct tcp_sock *tp = tcp_sk(sk);
134
135 if (ce_state == 1)
136 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
137 else
138 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
139}
140
141/* Minimal DCTP CE state machine:
142 *
143 * S: 0 <- last pkt was non-CE
144 * 1 <- last pkt was CE
145 */
146static __always_inline
147void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
148 __u32 *prior_rcv_nxt, __u32 *ce_state)
149{
150 __u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
151
152 if (*ce_state != new_ce_state) {
153 /* CE state has changed, force an immediate ACK to
154 * reflect the new CE state. If an ACK was delayed,
155 * send that first to reflect the prior CE state.
156 */
157 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
158 dctcp_ece_ack_cwr(sk, *ce_state);
159 bpf_tcp_send_ack(sk, *prior_rcv_nxt);
160 }
161 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
162 }
163 *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
164 *ce_state = new_ce_state;
165 dctcp_ece_ack_cwr(sk, new_ce_state);
166}
167
168SEC("struct_ops/dctcp_cwnd_event")
169void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
170{
171 struct dctcp *ca = inet_csk_ca(sk);
172
173 switch (ev) {
174 case CA_EVENT_ECN_IS_CE:
175 case CA_EVENT_ECN_NO_CE:
176 dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
177 break;
178 case CA_EVENT_LOSS:
179 dctcp_react_to_loss(sk);
180 break;
181 default:
182 /* Don't care for the rest. */
183 break;
184 }
185}
186
187SEC("struct_ops/dctcp_cwnd_undo")
188__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
189{
190 const struct dctcp *ca = inet_csk_ca(sk);
191
192 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
193}
194
195SEC("struct_ops/tcp_reno_cong_avoid")
196void BPF_PROG(tcp_reno_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
197{
198 struct tcp_sock *tp = tcp_sk(sk);
199
200 if (!tcp_is_cwnd_limited(sk))
201 return;
202
203 /* In "safe" area, increase. */
204 if (tcp_in_slow_start(tp)) {
205 acked = tcp_slow_start(tp, acked);
206 if (!acked)
207 return;
208 }
209 /* In dangerous area, increase slowly. */
210 tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
211}
212
213SEC(".struct_ops")
214struct tcp_congestion_ops dctcp_nouse = {
215 .init = (void *)dctcp_init,
216 .set_state = (void *)dctcp_state,
217 .flags = TCP_CONG_NEEDS_ECN,
218 .name = "bpf_dctcp_nouse",
219};
220
221SEC(".struct_ops")
222struct tcp_congestion_ops dctcp = {
223 .init = (void *)dctcp_init,
224 .in_ack_event = (void *)dctcp_update_alpha,
225 .cwnd_event = (void *)dctcp_cwnd_event,
226 .ssthresh = (void *)dctcp_ssthresh,
227 .cong_avoid = (void *)tcp_reno_cong_avoid,
228 .undo_cwnd = (void *)dctcp_cwnd_undo,
229 .set_state = (void *)dctcp_state,
230 .flags = TCP_CONG_NEEDS_ECN,
231 .name = "bpf_dctcp",
232};
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3
4/* WARNING: This implemenation is not necessarily the same
5 * as the tcp_dctcp.c. The purpose is mainly for testing
6 * the kernel BPF logic.
7 */
8
9#include <stddef.h>
10#include <linux/bpf.h>
11#include <linux/types.h>
12#include <linux/stddef.h>
13#include <linux/tcp.h>
14#include <errno.h>
15#include <bpf/bpf_helpers.h>
16#include <bpf/bpf_tracing.h>
17#include "bpf_tcp_helpers.h"
18
19char _license[] SEC("license") = "GPL";
20
21volatile const char fallback[TCP_CA_NAME_MAX];
22const char bpf_dctcp[] = "bpf_dctcp";
23const char tcp_cdg[] = "cdg";
24char cc_res[TCP_CA_NAME_MAX];
25int tcp_cdg_res = 0;
26int stg_result = 0;
27int ebusy_cnt = 0;
28
29struct {
30 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
31 __uint(map_flags, BPF_F_NO_PREALLOC);
32 __type(key, int);
33 __type(value, int);
34} sk_stg_map SEC(".maps");
35
36#define DCTCP_MAX_ALPHA 1024U
37
38struct dctcp {
39 __u32 old_delivered;
40 __u32 old_delivered_ce;
41 __u32 prior_rcv_nxt;
42 __u32 dctcp_alpha;
43 __u32 next_seq;
44 __u32 ce_state;
45 __u32 loss_cwnd;
46};
47
48static unsigned int dctcp_shift_g = 4; /* g = 1/2^4 */
49static unsigned int dctcp_alpha_on_init = DCTCP_MAX_ALPHA;
50
51static __always_inline void dctcp_reset(const struct tcp_sock *tp,
52 struct dctcp *ca)
53{
54 ca->next_seq = tp->snd_nxt;
55
56 ca->old_delivered = tp->delivered;
57 ca->old_delivered_ce = tp->delivered_ce;
58}
59
60SEC("struct_ops/dctcp_init")
61void BPF_PROG(dctcp_init, struct sock *sk)
62{
63 const struct tcp_sock *tp = tcp_sk(sk);
64 struct dctcp *ca = inet_csk_ca(sk);
65 int *stg;
66
67 if (!(tp->ecn_flags & TCP_ECN_OK) && fallback[0]) {
68 /* Switch to fallback */
69 if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
70 (void *)fallback, sizeof(fallback)) == -EBUSY)
71 ebusy_cnt++;
72
73 /* Switch back to myself and the recurred dctcp_init()
74 * will get -EBUSY for all bpf_setsockopt(TCP_CONGESTION),
75 * except the last "cdg" one.
76 */
77 if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
78 (void *)bpf_dctcp, sizeof(bpf_dctcp)) == -EBUSY)
79 ebusy_cnt++;
80
81 /* Switch back to fallback */
82 if (bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
83 (void *)fallback, sizeof(fallback)) == -EBUSY)
84 ebusy_cnt++;
85
86 /* Expecting -ENOTSUPP for tcp_cdg_res */
87 tcp_cdg_res = bpf_setsockopt(sk, SOL_TCP, TCP_CONGESTION,
88 (void *)tcp_cdg, sizeof(tcp_cdg));
89 bpf_getsockopt(sk, SOL_TCP, TCP_CONGESTION,
90 (void *)cc_res, sizeof(cc_res));
91 return;
92 }
93
94 ca->prior_rcv_nxt = tp->rcv_nxt;
95 ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
96 ca->loss_cwnd = 0;
97 ca->ce_state = 0;
98
99 stg = bpf_sk_storage_get(&sk_stg_map, (void *)tp, NULL, 0);
100 if (stg) {
101 stg_result = *stg;
102 bpf_sk_storage_delete(&sk_stg_map, (void *)tp);
103 }
104 dctcp_reset(tp, ca);
105}
106
107SEC("struct_ops/dctcp_ssthresh")
108__u32 BPF_PROG(dctcp_ssthresh, struct sock *sk)
109{
110 struct dctcp *ca = inet_csk_ca(sk);
111 struct tcp_sock *tp = tcp_sk(sk);
112
113 ca->loss_cwnd = tp->snd_cwnd;
114 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
115}
116
117SEC("struct_ops/dctcp_update_alpha")
118void BPF_PROG(dctcp_update_alpha, struct sock *sk, __u32 flags)
119{
120 const struct tcp_sock *tp = tcp_sk(sk);
121 struct dctcp *ca = inet_csk_ca(sk);
122
123 /* Expired RTT */
124 if (!before(tp->snd_una, ca->next_seq)) {
125 __u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
126 __u32 alpha = ca->dctcp_alpha;
127
128 /* alpha = (1 - g) * alpha + g * F */
129
130 alpha -= min_not_zero(alpha, alpha >> dctcp_shift_g);
131 if (delivered_ce) {
132 __u32 delivered = tp->delivered - ca->old_delivered;
133
134 /* If dctcp_shift_g == 1, a 32bit value would overflow
135 * after 8 M packets.
136 */
137 delivered_ce <<= (10 - dctcp_shift_g);
138 delivered_ce /= max(1U, delivered);
139
140 alpha = min(alpha + delivered_ce, DCTCP_MAX_ALPHA);
141 }
142 ca->dctcp_alpha = alpha;
143 dctcp_reset(tp, ca);
144 }
145}
146
147static __always_inline void dctcp_react_to_loss(struct sock *sk)
148{
149 struct dctcp *ca = inet_csk_ca(sk);
150 struct tcp_sock *tp = tcp_sk(sk);
151
152 ca->loss_cwnd = tp->snd_cwnd;
153 tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
154}
155
156SEC("struct_ops/dctcp_state")
157void BPF_PROG(dctcp_state, struct sock *sk, __u8 new_state)
158{
159 if (new_state == TCP_CA_Recovery &&
160 new_state != BPF_CORE_READ_BITFIELD(inet_csk(sk), icsk_ca_state))
161 dctcp_react_to_loss(sk);
162 /* We handle RTO in dctcp_cwnd_event to ensure that we perform only
163 * one loss-adjustment per RTT.
164 */
165}
166
167static __always_inline void dctcp_ece_ack_cwr(struct sock *sk, __u32 ce_state)
168{
169 struct tcp_sock *tp = tcp_sk(sk);
170
171 if (ce_state == 1)
172 tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
173 else
174 tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
175}
176
177/* Minimal DCTP CE state machine:
178 *
179 * S: 0 <- last pkt was non-CE
180 * 1 <- last pkt was CE
181 */
182static __always_inline
183void dctcp_ece_ack_update(struct sock *sk, enum tcp_ca_event evt,
184 __u32 *prior_rcv_nxt, __u32 *ce_state)
185{
186 __u32 new_ce_state = (evt == CA_EVENT_ECN_IS_CE) ? 1 : 0;
187
188 if (*ce_state != new_ce_state) {
189 /* CE state has changed, force an immediate ACK to
190 * reflect the new CE state. If an ACK was delayed,
191 * send that first to reflect the prior CE state.
192 */
193 if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER) {
194 dctcp_ece_ack_cwr(sk, *ce_state);
195 bpf_tcp_send_ack(sk, *prior_rcv_nxt);
196 }
197 inet_csk(sk)->icsk_ack.pending |= ICSK_ACK_NOW;
198 }
199 *prior_rcv_nxt = tcp_sk(sk)->rcv_nxt;
200 *ce_state = new_ce_state;
201 dctcp_ece_ack_cwr(sk, new_ce_state);
202}
203
204SEC("struct_ops/dctcp_cwnd_event")
205void BPF_PROG(dctcp_cwnd_event, struct sock *sk, enum tcp_ca_event ev)
206{
207 struct dctcp *ca = inet_csk_ca(sk);
208
209 switch (ev) {
210 case CA_EVENT_ECN_IS_CE:
211 case CA_EVENT_ECN_NO_CE:
212 dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
213 break;
214 case CA_EVENT_LOSS:
215 dctcp_react_to_loss(sk);
216 break;
217 default:
218 /* Don't care for the rest. */
219 break;
220 }
221}
222
223SEC("struct_ops/dctcp_cwnd_undo")
224__u32 BPF_PROG(dctcp_cwnd_undo, struct sock *sk)
225{
226 const struct dctcp *ca = inet_csk_ca(sk);
227
228 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
229}
230
231extern void tcp_reno_cong_avoid(struct sock *sk, __u32 ack, __u32 acked) __ksym;
232
233SEC("struct_ops/dctcp_reno_cong_avoid")
234void BPF_PROG(dctcp_cong_avoid, struct sock *sk, __u32 ack, __u32 acked)
235{
236 tcp_reno_cong_avoid(sk, ack, acked);
237}
238
239SEC(".struct_ops")
240struct tcp_congestion_ops dctcp_nouse = {
241 .init = (void *)dctcp_init,
242 .set_state = (void *)dctcp_state,
243 .flags = TCP_CONG_NEEDS_ECN,
244 .name = "bpf_dctcp_nouse",
245};
246
247SEC(".struct_ops")
248struct tcp_congestion_ops dctcp = {
249 .init = (void *)dctcp_init,
250 .in_ack_event = (void *)dctcp_update_alpha,
251 .cwnd_event = (void *)dctcp_cwnd_event,
252 .ssthresh = (void *)dctcp_ssthresh,
253 .cong_avoid = (void *)dctcp_cong_avoid,
254 .undo_cwnd = (void *)dctcp_cwnd_undo,
255 .set_state = (void *)dctcp_state,
256 .flags = TCP_CONG_NEEDS_ECN,
257 .name = "bpf_dctcp",
258};