Loading...
1// SPDX-License-Identifier: GPL-2.0
2
3#include "bpf_tracing_net.h"
4#include <bpf/bpf_helpers.h>
5#include <bpf/bpf_tracing.h>
6
7char _license[] SEC("license") = "GPL";
8
9#define USEC_PER_SEC 1000000UL
10
11#define min(a, b) ((a) < (b) ? (a) : (b))
12
13static unsigned int tcp_left_out(const struct tcp_sock *tp)
14{
15 return tp->sacked_out + tp->lost_out;
16}
17
18static unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
19{
20 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
21}
22
23SEC("struct_ops")
24void BPF_PROG(write_sk_pacing_init, struct sock *sk)
25{
26#ifdef ENABLE_ATOMICS_TESTS
27 __sync_bool_compare_and_swap(&sk->sk_pacing_status, SK_PACING_NONE,
28 SK_PACING_NEEDED);
29#else
30 sk->sk_pacing_status = SK_PACING_NEEDED;
31#endif
32}
33
34SEC("struct_ops")
35void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
36 const struct rate_sample *rs)
37{
38 struct tcp_sock *tp = tcp_sk(sk);
39 unsigned long rate =
40 ((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) /
41 (tp->srtt_us ?: 1U << 3);
42 sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
43 tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1;
44}
45
46SEC("struct_ops")
47__u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk)
48{
49 return tcp_sk(sk)->snd_ssthresh;
50}
51
52SEC("struct_ops")
53__u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk)
54{
55 return tcp_sk(sk)->snd_cwnd;
56}
57
58SEC(".struct_ops")
59struct tcp_congestion_ops write_sk_pacing = {
60 .init = (void *)write_sk_pacing_init,
61 .cong_control = (void *)write_sk_pacing_cong_control,
62 .ssthresh = (void *)write_sk_pacing_ssthresh,
63 .undo_cwnd = (void *)write_sk_pacing_undo_cwnd,
64 .name = "bpf_w_sk_pacing",
65};
1// SPDX-License-Identifier: GPL-2.0
2
3#include "vmlinux.h"
4
5#include <bpf/bpf_helpers.h>
6#include <bpf/bpf_tracing.h>
7
8char _license[] SEC("license") = "GPL";
9
10#define USEC_PER_SEC 1000000UL
11
12#define min(a, b) ((a) < (b) ? (a) : (b))
13
14static inline struct tcp_sock *tcp_sk(const struct sock *sk)
15{
16 return (struct tcp_sock *)sk;
17}
18
19static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
20{
21 return tp->sacked_out + tp->lost_out;
22}
23
24static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
25{
26 return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
27}
28
29SEC("struct_ops/write_sk_pacing_init")
30void BPF_PROG(write_sk_pacing_init, struct sock *sk)
31{
32#ifdef ENABLE_ATOMICS_TESTS
33 __sync_bool_compare_and_swap(&sk->sk_pacing_status, SK_PACING_NONE,
34 SK_PACING_NEEDED);
35#else
36 sk->sk_pacing_status = SK_PACING_NEEDED;
37#endif
38}
39
40SEC("struct_ops/write_sk_pacing_cong_control")
41void BPF_PROG(write_sk_pacing_cong_control, struct sock *sk,
42 const struct rate_sample *rs)
43{
44 struct tcp_sock *tp = tcp_sk(sk);
45 unsigned long rate =
46 ((tp->snd_cwnd * tp->mss_cache * USEC_PER_SEC) << 3) /
47 (tp->srtt_us ?: 1U << 3);
48 sk->sk_pacing_rate = min(rate, sk->sk_max_pacing_rate);
49 tp->app_limited = (tp->delivered + tcp_packets_in_flight(tp)) ?: 1;
50}
51
52SEC("struct_ops/write_sk_pacing_ssthresh")
53__u32 BPF_PROG(write_sk_pacing_ssthresh, struct sock *sk)
54{
55 return tcp_sk(sk)->snd_ssthresh;
56}
57
58SEC("struct_ops/write_sk_pacing_undo_cwnd")
59__u32 BPF_PROG(write_sk_pacing_undo_cwnd, struct sock *sk)
60{
61 return tcp_sk(sk)->snd_cwnd;
62}
63
64SEC(".struct_ops")
65struct tcp_congestion_ops write_sk_pacing = {
66 .init = (void *)write_sk_pacing_init,
67 .cong_control = (void *)write_sk_pacing_cong_control,
68 .ssthresh = (void *)write_sk_pacing_ssthresh,
69 .undo_cwnd = (void *)write_sk_pacing_undo_cwnd,
70 .name = "bpf_w_sk_pacing",
71};