Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3
4#include <linux/types.h>
5#include <linux/bpf_verifier.h>
6#include <linux/bpf.h>
7#include <linux/btf.h>
8#include <linux/filter.h>
9#include <net/tcp.h>
10#include <net/bpf_sk_storage.h>
11
12static u32 optional_ops[] = {
13 offsetof(struct tcp_congestion_ops, init),
14 offsetof(struct tcp_congestion_ops, release),
15 offsetof(struct tcp_congestion_ops, set_state),
16 offsetof(struct tcp_congestion_ops, cwnd_event),
17 offsetof(struct tcp_congestion_ops, in_ack_event),
18 offsetof(struct tcp_congestion_ops, pkts_acked),
19 offsetof(struct tcp_congestion_ops, min_tso_segs),
20 offsetof(struct tcp_congestion_ops, sndbuf_expand),
21 offsetof(struct tcp_congestion_ops, cong_control),
22};
23
24static u32 unsupported_ops[] = {
25 offsetof(struct tcp_congestion_ops, get_info),
26};
27
28static const struct btf_type *tcp_sock_type;
29static u32 tcp_sock_id, sock_id;
30
31static int btf_sk_storage_get_ids[5];
32static struct bpf_func_proto btf_sk_storage_get_proto __read_mostly;
33
34static int btf_sk_storage_delete_ids[5];
35static struct bpf_func_proto btf_sk_storage_delete_proto __read_mostly;
36
37static void convert_sk_func_proto(struct bpf_func_proto *to, int *to_btf_ids,
38 const struct bpf_func_proto *from)
39{
40 int i;
41
42 *to = *from;
43 to->btf_id = to_btf_ids;
44 for (i = 0; i < ARRAY_SIZE(to->arg_type); i++) {
45 if (to->arg_type[i] == ARG_PTR_TO_SOCKET) {
46 to->arg_type[i] = ARG_PTR_TO_BTF_ID;
47 to->btf_id[i] = tcp_sock_id;
48 }
49 }
50}
51
52static int bpf_tcp_ca_init(struct btf *btf)
53{
54 s32 type_id;
55
56 type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
57 if (type_id < 0)
58 return -EINVAL;
59 sock_id = type_id;
60
61 type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
62 if (type_id < 0)
63 return -EINVAL;
64 tcp_sock_id = type_id;
65 tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
66
67 convert_sk_func_proto(&btf_sk_storage_get_proto,
68 btf_sk_storage_get_ids,
69 &bpf_sk_storage_get_proto);
70 convert_sk_func_proto(&btf_sk_storage_delete_proto,
71 btf_sk_storage_delete_ids,
72 &bpf_sk_storage_delete_proto);
73
74 return 0;
75}
76
77static bool is_optional(u32 member_offset)
78{
79 unsigned int i;
80
81 for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
82 if (member_offset == optional_ops[i])
83 return true;
84 }
85
86 return false;
87}
88
89static bool is_unsupported(u32 member_offset)
90{
91 unsigned int i;
92
93 for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
94 if (member_offset == unsupported_ops[i])
95 return true;
96 }
97
98 return false;
99}
100
101extern struct btf *btf_vmlinux;
102
103static bool bpf_tcp_ca_is_valid_access(int off, int size,
104 enum bpf_access_type type,
105 const struct bpf_prog *prog,
106 struct bpf_insn_access_aux *info)
107{
108 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
109 return false;
110 if (type != BPF_READ)
111 return false;
112 if (off % size != 0)
113 return false;
114
115 if (!btf_ctx_access(off, size, type, prog, info))
116 return false;
117
118 if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
119 /* promote it to tcp_sock */
120 info->btf_id = tcp_sock_id;
121
122 return true;
123}
124
125static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
126 const struct btf_type *t, int off,
127 int size, enum bpf_access_type atype,
128 u32 *next_btf_id)
129{
130 size_t end;
131
132 if (atype == BPF_READ)
133 return btf_struct_access(log, t, off, size, atype, next_btf_id);
134
135 if (t != tcp_sock_type) {
136 bpf_log(log, "only read is supported\n");
137 return -EACCES;
138 }
139
140 switch (off) {
141 case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
142 end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
143 break;
144 case offsetof(struct inet_connection_sock, icsk_ack.pending):
145 end = offsetofend(struct inet_connection_sock,
146 icsk_ack.pending);
147 break;
148 case offsetof(struct tcp_sock, snd_cwnd):
149 end = offsetofend(struct tcp_sock, snd_cwnd);
150 break;
151 case offsetof(struct tcp_sock, snd_cwnd_cnt):
152 end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
153 break;
154 case offsetof(struct tcp_sock, snd_ssthresh):
155 end = offsetofend(struct tcp_sock, snd_ssthresh);
156 break;
157 case offsetof(struct tcp_sock, ecn_flags):
158 end = offsetofend(struct tcp_sock, ecn_flags);
159 break;
160 default:
161 bpf_log(log, "no write support to tcp_sock at off %d\n", off);
162 return -EACCES;
163 }
164
165 if (off + size > end) {
166 bpf_log(log,
167 "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
168 off, size, end);
169 return -EACCES;
170 }
171
172 return NOT_INIT;
173}
174
175BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
176{
177 /* bpf_tcp_ca prog cannot have NULL tp */
178 __tcp_send_ack((struct sock *)tp, rcv_nxt);
179 return 0;
180}
181
182static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
183 .func = bpf_tcp_send_ack,
184 .gpl_only = false,
185 /* In case we want to report error later */
186 .ret_type = RET_INTEGER,
187 .arg1_type = ARG_PTR_TO_BTF_ID,
188 .arg2_type = ARG_ANYTHING,
189 .btf_id = &tcp_sock_id,
190};
191
192static const struct bpf_func_proto *
193bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
194 const struct bpf_prog *prog)
195{
196 switch (func_id) {
197 case BPF_FUNC_tcp_send_ack:
198 return &bpf_tcp_send_ack_proto;
199 case BPF_FUNC_sk_storage_get:
200 return &btf_sk_storage_get_proto;
201 case BPF_FUNC_sk_storage_delete:
202 return &btf_sk_storage_delete_proto;
203 default:
204 return bpf_base_func_proto(func_id);
205 }
206}
207
208static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
209 .get_func_proto = bpf_tcp_ca_get_func_proto,
210 .is_valid_access = bpf_tcp_ca_is_valid_access,
211 .btf_struct_access = bpf_tcp_ca_btf_struct_access,
212};
213
214static int bpf_tcp_ca_init_member(const struct btf_type *t,
215 const struct btf_member *member,
216 void *kdata, const void *udata)
217{
218 const struct tcp_congestion_ops *utcp_ca;
219 struct tcp_congestion_ops *tcp_ca;
220 int prog_fd;
221 u32 moff;
222
223 utcp_ca = (const struct tcp_congestion_ops *)udata;
224 tcp_ca = (struct tcp_congestion_ops *)kdata;
225
226 moff = btf_member_bit_offset(t, member) / 8;
227 switch (moff) {
228 case offsetof(struct tcp_congestion_ops, flags):
229 if (utcp_ca->flags & ~TCP_CONG_MASK)
230 return -EINVAL;
231 tcp_ca->flags = utcp_ca->flags;
232 return 1;
233 case offsetof(struct tcp_congestion_ops, name):
234 if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
235 sizeof(tcp_ca->name)) <= 0)
236 return -EINVAL;
237 if (tcp_ca_find(utcp_ca->name))
238 return -EEXIST;
239 return 1;
240 }
241
242 if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
243 return 0;
244
245 /* Ensure bpf_prog is provided for compulsory func ptr */
246 prog_fd = (int)(*(unsigned long *)(udata + moff));
247 if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
248 return -EINVAL;
249
250 return 0;
251}
252
253static int bpf_tcp_ca_check_member(const struct btf_type *t,
254 const struct btf_member *member)
255{
256 if (is_unsupported(btf_member_bit_offset(t, member) / 8))
257 return -ENOTSUPP;
258 return 0;
259}
260
261static int bpf_tcp_ca_reg(void *kdata)
262{
263 return tcp_register_congestion_control(kdata);
264}
265
266static void bpf_tcp_ca_unreg(void *kdata)
267{
268 tcp_unregister_congestion_control(kdata);
269}
270
271/* Avoid sparse warning. It is only used in bpf_struct_ops.c. */
272extern struct bpf_struct_ops bpf_tcp_congestion_ops;
273
274struct bpf_struct_ops bpf_tcp_congestion_ops = {
275 .verifier_ops = &bpf_tcp_ca_verifier_ops,
276 .reg = bpf_tcp_ca_reg,
277 .unreg = bpf_tcp_ca_unreg,
278 .check_member = bpf_tcp_ca_check_member,
279 .init_member = bpf_tcp_ca_init_member,
280 .init = bpf_tcp_ca_init,
281 .name = "tcp_congestion_ops",
282};
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3
4#include <linux/init.h>
5#include <linux/types.h>
6#include <linux/bpf_verifier.h>
7#include <linux/bpf.h>
8#include <linux/btf.h>
9#include <linux/btf_ids.h>
10#include <linux/filter.h>
11#include <net/tcp.h>
12#include <net/bpf_sk_storage.h>
13
14/* "extern" is to avoid sparse warning. It is only used in bpf_struct_ops.c. */
15static struct bpf_struct_ops bpf_tcp_congestion_ops;
16
17static const struct btf_type *tcp_sock_type;
18static u32 tcp_sock_id, sock_id;
19static const struct btf_type *tcp_congestion_ops_type;
20
21static int bpf_tcp_ca_init(struct btf *btf)
22{
23 s32 type_id;
24
25 type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
26 if (type_id < 0)
27 return -EINVAL;
28 sock_id = type_id;
29
30 type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
31 if (type_id < 0)
32 return -EINVAL;
33 tcp_sock_id = type_id;
34 tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
35
36 type_id = btf_find_by_name_kind(btf, "tcp_congestion_ops", BTF_KIND_STRUCT);
37 if (type_id < 0)
38 return -EINVAL;
39 tcp_congestion_ops_type = btf_type_by_id(btf, type_id);
40
41 return 0;
42}
43
44static bool bpf_tcp_ca_is_valid_access(int off, int size,
45 enum bpf_access_type type,
46 const struct bpf_prog *prog,
47 struct bpf_insn_access_aux *info)
48{
49 if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
50 return false;
51
52 if (base_type(info->reg_type) == PTR_TO_BTF_ID &&
53 !bpf_type_has_unsafe_modifiers(info->reg_type) &&
54 info->btf_id == sock_id)
55 /* promote it to tcp_sock */
56 info->btf_id = tcp_sock_id;
57
58 return true;
59}
60
61static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
62 const struct bpf_reg_state *reg,
63 int off, int size)
64{
65 const struct btf_type *t;
66 size_t end;
67
68 t = btf_type_by_id(reg->btf, reg->btf_id);
69 if (t != tcp_sock_type) {
70 bpf_log(log, "only read is supported\n");
71 return -EACCES;
72 }
73
74 switch (off) {
75 case offsetof(struct sock, sk_pacing_rate):
76 end = offsetofend(struct sock, sk_pacing_rate);
77 break;
78 case offsetof(struct sock, sk_pacing_status):
79 end = offsetofend(struct sock, sk_pacing_status);
80 break;
81 case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
82 end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
83 break;
84 case offsetof(struct inet_connection_sock, icsk_ack.pending):
85 end = offsetofend(struct inet_connection_sock,
86 icsk_ack.pending);
87 break;
88 case offsetof(struct tcp_sock, snd_cwnd):
89 end = offsetofend(struct tcp_sock, snd_cwnd);
90 break;
91 case offsetof(struct tcp_sock, snd_cwnd_cnt):
92 end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
93 break;
94 case offsetof(struct tcp_sock, snd_cwnd_stamp):
95 end = offsetofend(struct tcp_sock, snd_cwnd_stamp);
96 break;
97 case offsetof(struct tcp_sock, snd_ssthresh):
98 end = offsetofend(struct tcp_sock, snd_ssthresh);
99 break;
100 case offsetof(struct tcp_sock, ecn_flags):
101 end = offsetofend(struct tcp_sock, ecn_flags);
102 break;
103 case offsetof(struct tcp_sock, app_limited):
104 end = offsetofend(struct tcp_sock, app_limited);
105 break;
106 default:
107 bpf_log(log, "no write support to tcp_sock at off %d\n", off);
108 return -EACCES;
109 }
110
111 if (off + size > end) {
112 bpf_log(log,
113 "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
114 off, size, end);
115 return -EACCES;
116 }
117
118 return 0;
119}
120
121BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
122{
123 /* bpf_tcp_ca prog cannot have NULL tp */
124 __tcp_send_ack((struct sock *)tp, rcv_nxt);
125 return 0;
126}
127
128static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
129 .func = bpf_tcp_send_ack,
130 .gpl_only = false,
131 /* In case we want to report error later */
132 .ret_type = RET_INTEGER,
133 .arg1_type = ARG_PTR_TO_BTF_ID,
134 .arg1_btf_id = &tcp_sock_id,
135 .arg2_type = ARG_ANYTHING,
136};
137
138static u32 prog_ops_moff(const struct bpf_prog *prog)
139{
140 const struct btf_member *m;
141 const struct btf_type *t;
142 u32 midx;
143
144 midx = prog->expected_attach_type;
145 t = tcp_congestion_ops_type;
146 m = &btf_type_member(t)[midx];
147
148 return __btf_member_bit_offset(t, m) / 8;
149}
150
151static const struct bpf_func_proto *
152bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
153 const struct bpf_prog *prog)
154{
155 switch (func_id) {
156 case BPF_FUNC_tcp_send_ack:
157 return &bpf_tcp_send_ack_proto;
158 case BPF_FUNC_sk_storage_get:
159 return &bpf_sk_storage_get_proto;
160 case BPF_FUNC_sk_storage_delete:
161 return &bpf_sk_storage_delete_proto;
162 case BPF_FUNC_setsockopt:
163 /* Does not allow release() to call setsockopt.
164 * release() is called when the current bpf-tcp-cc
165 * is retiring. It is not allowed to call
166 * setsockopt() to make further changes which
167 * may potentially allocate new resources.
168 */
169 if (prog_ops_moff(prog) !=
170 offsetof(struct tcp_congestion_ops, release))
171 return &bpf_sk_setsockopt_proto;
172 return NULL;
173 case BPF_FUNC_getsockopt:
174 /* Since get/setsockopt is usually expected to
175 * be available together, disable getsockopt for
176 * release also to avoid usage surprise.
177 * The bpf-tcp-cc already has a more powerful way
178 * to read tcp_sock from the PTR_TO_BTF_ID.
179 */
180 if (prog_ops_moff(prog) !=
181 offsetof(struct tcp_congestion_ops, release))
182 return &bpf_sk_getsockopt_proto;
183 return NULL;
184 case BPF_FUNC_ktime_get_coarse_ns:
185 return &bpf_ktime_get_coarse_ns_proto;
186 default:
187 return bpf_base_func_proto(func_id, prog);
188 }
189}
190
191BTF_KFUNCS_START(bpf_tcp_ca_check_kfunc_ids)
192BTF_ID_FLAGS(func, tcp_reno_ssthresh)
193BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
194BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
195BTF_ID_FLAGS(func, tcp_slow_start)
196BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
197BTF_KFUNCS_END(bpf_tcp_ca_check_kfunc_ids)
198
199static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
200 .owner = THIS_MODULE,
201 .set = &bpf_tcp_ca_check_kfunc_ids,
202};
203
204static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
205 .get_func_proto = bpf_tcp_ca_get_func_proto,
206 .is_valid_access = bpf_tcp_ca_is_valid_access,
207 .btf_struct_access = bpf_tcp_ca_btf_struct_access,
208};
209
210static int bpf_tcp_ca_init_member(const struct btf_type *t,
211 const struct btf_member *member,
212 void *kdata, const void *udata)
213{
214 const struct tcp_congestion_ops *utcp_ca;
215 struct tcp_congestion_ops *tcp_ca;
216 u32 moff;
217
218 utcp_ca = (const struct tcp_congestion_ops *)udata;
219 tcp_ca = (struct tcp_congestion_ops *)kdata;
220
221 moff = __btf_member_bit_offset(t, member) / 8;
222 switch (moff) {
223 case offsetof(struct tcp_congestion_ops, flags):
224 if (utcp_ca->flags & ~TCP_CONG_MASK)
225 return -EINVAL;
226 tcp_ca->flags = utcp_ca->flags;
227 return 1;
228 case offsetof(struct tcp_congestion_ops, name):
229 if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
230 sizeof(tcp_ca->name)) <= 0)
231 return -EINVAL;
232 return 1;
233 }
234
235 return 0;
236}
237
238static int bpf_tcp_ca_reg(void *kdata, struct bpf_link *link)
239{
240 return tcp_register_congestion_control(kdata);
241}
242
243static void bpf_tcp_ca_unreg(void *kdata, struct bpf_link *link)
244{
245 tcp_unregister_congestion_control(kdata);
246}
247
248static int bpf_tcp_ca_update(void *kdata, void *old_kdata, struct bpf_link *link)
249{
250 return tcp_update_congestion_control(kdata, old_kdata);
251}
252
253static int bpf_tcp_ca_validate(void *kdata)
254{
255 return tcp_validate_congestion_control(kdata);
256}
257
258static u32 bpf_tcp_ca_ssthresh(struct sock *sk)
259{
260 return 0;
261}
262
263static void bpf_tcp_ca_cong_avoid(struct sock *sk, u32 ack, u32 acked)
264{
265}
266
267static void bpf_tcp_ca_set_state(struct sock *sk, u8 new_state)
268{
269}
270
271static void bpf_tcp_ca_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
272{
273}
274
275static void bpf_tcp_ca_in_ack_event(struct sock *sk, u32 flags)
276{
277}
278
279static void bpf_tcp_ca_pkts_acked(struct sock *sk, const struct ack_sample *sample)
280{
281}
282
283static u32 bpf_tcp_ca_min_tso_segs(struct sock *sk)
284{
285 return 0;
286}
287
288static void bpf_tcp_ca_cong_control(struct sock *sk, u32 ack, int flag,
289 const struct rate_sample *rs)
290{
291}
292
293static u32 bpf_tcp_ca_undo_cwnd(struct sock *sk)
294{
295 return 0;
296}
297
298static u32 bpf_tcp_ca_sndbuf_expand(struct sock *sk)
299{
300 return 0;
301}
302
303static void __bpf_tcp_ca_init(struct sock *sk)
304{
305}
306
307static void __bpf_tcp_ca_release(struct sock *sk)
308{
309}
310
311static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = {
312 .ssthresh = bpf_tcp_ca_ssthresh,
313 .cong_avoid = bpf_tcp_ca_cong_avoid,
314 .set_state = bpf_tcp_ca_set_state,
315 .cwnd_event = bpf_tcp_ca_cwnd_event,
316 .in_ack_event = bpf_tcp_ca_in_ack_event,
317 .pkts_acked = bpf_tcp_ca_pkts_acked,
318 .min_tso_segs = bpf_tcp_ca_min_tso_segs,
319 .cong_control = bpf_tcp_ca_cong_control,
320 .undo_cwnd = bpf_tcp_ca_undo_cwnd,
321 .sndbuf_expand = bpf_tcp_ca_sndbuf_expand,
322
323 .init = __bpf_tcp_ca_init,
324 .release = __bpf_tcp_ca_release,
325};
326
327static struct bpf_struct_ops bpf_tcp_congestion_ops = {
328 .verifier_ops = &bpf_tcp_ca_verifier_ops,
329 .reg = bpf_tcp_ca_reg,
330 .unreg = bpf_tcp_ca_unreg,
331 .update = bpf_tcp_ca_update,
332 .init_member = bpf_tcp_ca_init_member,
333 .init = bpf_tcp_ca_init,
334 .validate = bpf_tcp_ca_validate,
335 .name = "tcp_congestion_ops",
336 .cfi_stubs = &__bpf_ops_tcp_congestion_ops,
337 .owner = THIS_MODULE,
338};
339
340static int __init bpf_tcp_ca_kfunc_init(void)
341{
342 int ret;
343
344 ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
345 ret = ret ?: register_bpf_struct_ops(&bpf_tcp_congestion_ops, tcp_congestion_ops);
346
347 return ret;
348}
349late_initcall(bpf_tcp_ca_kfunc_init);