Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3
4#include <linux/types.h>
5#include <linux/bpf_verifier.h>
6#include <linux/bpf.h>
7#include <linux/btf.h>
8#include <linux/btf_ids.h>
9#include <linux/filter.h>
10#include <net/tcp.h>
11#include <net/bpf_sk_storage.h>
12
13static u32 optional_ops[] = {
14 offsetof(struct tcp_congestion_ops, init),
15 offsetof(struct tcp_congestion_ops, release),
16 offsetof(struct tcp_congestion_ops, set_state),
17 offsetof(struct tcp_congestion_ops, cwnd_event),
18 offsetof(struct tcp_congestion_ops, in_ack_event),
19 offsetof(struct tcp_congestion_ops, pkts_acked),
20 offsetof(struct tcp_congestion_ops, min_tso_segs),
21 offsetof(struct tcp_congestion_ops, sndbuf_expand),
22 offsetof(struct tcp_congestion_ops, cong_control),
23};
24
25static u32 unsupported_ops[] = {
26 offsetof(struct tcp_congestion_ops, get_info),
27};
28
29static const struct btf_type *tcp_sock_type;
30static u32 tcp_sock_id, sock_id;
31
32static int bpf_tcp_ca_init(struct btf *btf)
33{
34 s32 type_id;
35
36 type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
37 if (type_id < 0)
38 return -EINVAL;
39 sock_id = type_id;
40
41 type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
42 if (type_id < 0)
43 return -EINVAL;
44 tcp_sock_id = type_id;
45 tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
46
47 return 0;
48}
49
50static bool is_optional(u32 member_offset)
51{
52 unsigned int i;
53
54 for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
55 if (member_offset == optional_ops[i])
56 return true;
57 }
58
59 return false;
60}
61
62static bool is_unsupported(u32 member_offset)
63{
64 unsigned int i;
65
66 for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
67 if (member_offset == unsupported_ops[i])
68 return true;
69 }
70
71 return false;
72}
73
74extern struct btf *btf_vmlinux;
75
76static bool bpf_tcp_ca_is_valid_access(int off, int size,
77 enum bpf_access_type type,
78 const struct bpf_prog *prog,
79 struct bpf_insn_access_aux *info)
80{
81 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
82 return false;
83 if (type != BPF_READ)
84 return false;
85 if (off % size != 0)
86 return false;
87
88 if (!btf_ctx_access(off, size, type, prog, info))
89 return false;
90
91 if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
92 /* promote it to tcp_sock */
93 info->btf_id = tcp_sock_id;
94
95 return true;
96}
97
98static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
99 const struct btf *btf,
100 const struct btf_type *t, int off,
101 int size, enum bpf_access_type atype,
102 u32 *next_btf_id)
103{
104 size_t end;
105
106 if (atype == BPF_READ)
107 return btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
108
109 if (t != tcp_sock_type) {
110 bpf_log(log, "only read is supported\n");
111 return -EACCES;
112 }
113
114 switch (off) {
115 case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
116 end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
117 break;
118 case offsetof(struct inet_connection_sock, icsk_ack.pending):
119 end = offsetofend(struct inet_connection_sock,
120 icsk_ack.pending);
121 break;
122 case offsetof(struct tcp_sock, snd_cwnd):
123 end = offsetofend(struct tcp_sock, snd_cwnd);
124 break;
125 case offsetof(struct tcp_sock, snd_cwnd_cnt):
126 end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
127 break;
128 case offsetof(struct tcp_sock, snd_ssthresh):
129 end = offsetofend(struct tcp_sock, snd_ssthresh);
130 break;
131 case offsetof(struct tcp_sock, ecn_flags):
132 end = offsetofend(struct tcp_sock, ecn_flags);
133 break;
134 default:
135 bpf_log(log, "no write support to tcp_sock at off %d\n", off);
136 return -EACCES;
137 }
138
139 if (off + size > end) {
140 bpf_log(log,
141 "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
142 off, size, end);
143 return -EACCES;
144 }
145
146 return NOT_INIT;
147}
148
149BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
150{
151 /* bpf_tcp_ca prog cannot have NULL tp */
152 __tcp_send_ack((struct sock *)tp, rcv_nxt);
153 return 0;
154}
155
156static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
157 .func = bpf_tcp_send_ack,
158 .gpl_only = false,
159 /* In case we want to report error later */
160 .ret_type = RET_INTEGER,
161 .arg1_type = ARG_PTR_TO_BTF_ID,
162 .arg1_btf_id = &tcp_sock_id,
163 .arg2_type = ARG_ANYTHING,
164};
165
166static const struct bpf_func_proto *
167bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
168 const struct bpf_prog *prog)
169{
170 switch (func_id) {
171 case BPF_FUNC_tcp_send_ack:
172 return &bpf_tcp_send_ack_proto;
173 case BPF_FUNC_sk_storage_get:
174 return &bpf_sk_storage_get_proto;
175 case BPF_FUNC_sk_storage_delete:
176 return &bpf_sk_storage_delete_proto;
177 default:
178 return bpf_base_func_proto(func_id);
179 }
180}
181
182BTF_SET_START(bpf_tcp_ca_kfunc_ids)
183BTF_ID(func, tcp_reno_ssthresh)
184BTF_ID(func, tcp_reno_cong_avoid)
185BTF_ID(func, tcp_reno_undo_cwnd)
186BTF_ID(func, tcp_slow_start)
187BTF_ID(func, tcp_cong_avoid_ai)
188#ifdef CONFIG_X86
189#ifdef CONFIG_DYNAMIC_FTRACE
190#if IS_BUILTIN(CONFIG_TCP_CONG_CUBIC)
191BTF_ID(func, cubictcp_init)
192BTF_ID(func, cubictcp_recalc_ssthresh)
193BTF_ID(func, cubictcp_cong_avoid)
194BTF_ID(func, cubictcp_state)
195BTF_ID(func, cubictcp_cwnd_event)
196BTF_ID(func, cubictcp_acked)
197#endif
198#if IS_BUILTIN(CONFIG_TCP_CONG_DCTCP)
199BTF_ID(func, dctcp_init)
200BTF_ID(func, dctcp_update_alpha)
201BTF_ID(func, dctcp_cwnd_event)
202BTF_ID(func, dctcp_ssthresh)
203BTF_ID(func, dctcp_cwnd_undo)
204BTF_ID(func, dctcp_state)
205#endif
206#if IS_BUILTIN(CONFIG_TCP_CONG_BBR)
207BTF_ID(func, bbr_init)
208BTF_ID(func, bbr_main)
209BTF_ID(func, bbr_sndbuf_expand)
210BTF_ID(func, bbr_undo_cwnd)
211BTF_ID(func, bbr_cwnd_event)
212BTF_ID(func, bbr_ssthresh)
213BTF_ID(func, bbr_min_tso_segs)
214BTF_ID(func, bbr_set_state)
215#endif
216#endif /* CONFIG_DYNAMIC_FTRACE */
217#endif /* CONFIG_X86 */
218BTF_SET_END(bpf_tcp_ca_kfunc_ids)
219
220static bool bpf_tcp_ca_check_kfunc_call(u32 kfunc_btf_id)
221{
222 return btf_id_set_contains(&bpf_tcp_ca_kfunc_ids, kfunc_btf_id);
223}
224
225static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
226 .get_func_proto = bpf_tcp_ca_get_func_proto,
227 .is_valid_access = bpf_tcp_ca_is_valid_access,
228 .btf_struct_access = bpf_tcp_ca_btf_struct_access,
229 .check_kfunc_call = bpf_tcp_ca_check_kfunc_call,
230};
231
232static int bpf_tcp_ca_init_member(const struct btf_type *t,
233 const struct btf_member *member,
234 void *kdata, const void *udata)
235{
236 const struct tcp_congestion_ops *utcp_ca;
237 struct tcp_congestion_ops *tcp_ca;
238 int prog_fd;
239 u32 moff;
240
241 utcp_ca = (const struct tcp_congestion_ops *)udata;
242 tcp_ca = (struct tcp_congestion_ops *)kdata;
243
244 moff = btf_member_bit_offset(t, member) / 8;
245 switch (moff) {
246 case offsetof(struct tcp_congestion_ops, flags):
247 if (utcp_ca->flags & ~TCP_CONG_MASK)
248 return -EINVAL;
249 tcp_ca->flags = utcp_ca->flags;
250 return 1;
251 case offsetof(struct tcp_congestion_ops, name):
252 if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
253 sizeof(tcp_ca->name)) <= 0)
254 return -EINVAL;
255 if (tcp_ca_find(utcp_ca->name))
256 return -EEXIST;
257 return 1;
258 }
259
260 if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
261 return 0;
262
263 /* Ensure bpf_prog is provided for compulsory func ptr */
264 prog_fd = (int)(*(unsigned long *)(udata + moff));
265 if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
266 return -EINVAL;
267
268 return 0;
269}
270
271static int bpf_tcp_ca_check_member(const struct btf_type *t,
272 const struct btf_member *member)
273{
274 if (is_unsupported(btf_member_bit_offset(t, member) / 8))
275 return -ENOTSUPP;
276 return 0;
277}
278
279static int bpf_tcp_ca_reg(void *kdata)
280{
281 return tcp_register_congestion_control(kdata);
282}
283
284static void bpf_tcp_ca_unreg(void *kdata)
285{
286 tcp_unregister_congestion_control(kdata);
287}
288
289/* Avoid sparse warning. It is only used in bpf_struct_ops.c. */
290extern struct bpf_struct_ops bpf_tcp_congestion_ops;
291
292struct bpf_struct_ops bpf_tcp_congestion_ops = {
293 .verifier_ops = &bpf_tcp_ca_verifier_ops,
294 .reg = bpf_tcp_ca_reg,
295 .unreg = bpf_tcp_ca_unreg,
296 .check_member = bpf_tcp_ca_check_member,
297 .init_member = bpf_tcp_ca_init_member,
298 .init = bpf_tcp_ca_init,
299 .name = "tcp_congestion_ops",
300};
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2019 Facebook */
3
4#include <linux/types.h>
5#include <linux/bpf_verifier.h>
6#include <linux/bpf.h>
7#include <linux/btf.h>
8#include <linux/filter.h>
9#include <net/tcp.h>
10#include <net/bpf_sk_storage.h>
11
12static u32 optional_ops[] = {
13 offsetof(struct tcp_congestion_ops, init),
14 offsetof(struct tcp_congestion_ops, release),
15 offsetof(struct tcp_congestion_ops, set_state),
16 offsetof(struct tcp_congestion_ops, cwnd_event),
17 offsetof(struct tcp_congestion_ops, in_ack_event),
18 offsetof(struct tcp_congestion_ops, pkts_acked),
19 offsetof(struct tcp_congestion_ops, min_tso_segs),
20 offsetof(struct tcp_congestion_ops, sndbuf_expand),
21 offsetof(struct tcp_congestion_ops, cong_control),
22};
23
24static u32 unsupported_ops[] = {
25 offsetof(struct tcp_congestion_ops, get_info),
26};
27
28static const struct btf_type *tcp_sock_type;
29static u32 tcp_sock_id, sock_id;
30
31static int btf_sk_storage_get_ids[5];
32static struct bpf_func_proto btf_sk_storage_get_proto __read_mostly;
33
34static int btf_sk_storage_delete_ids[5];
35static struct bpf_func_proto btf_sk_storage_delete_proto __read_mostly;
36
37static void convert_sk_func_proto(struct bpf_func_proto *to, int *to_btf_ids,
38 const struct bpf_func_proto *from)
39{
40 int i;
41
42 *to = *from;
43 to->btf_id = to_btf_ids;
44 for (i = 0; i < ARRAY_SIZE(to->arg_type); i++) {
45 if (to->arg_type[i] == ARG_PTR_TO_SOCKET) {
46 to->arg_type[i] = ARG_PTR_TO_BTF_ID;
47 to->btf_id[i] = tcp_sock_id;
48 }
49 }
50}
51
52static int bpf_tcp_ca_init(struct btf *btf)
53{
54 s32 type_id;
55
56 type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
57 if (type_id < 0)
58 return -EINVAL;
59 sock_id = type_id;
60
61 type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
62 if (type_id < 0)
63 return -EINVAL;
64 tcp_sock_id = type_id;
65 tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
66
67 convert_sk_func_proto(&btf_sk_storage_get_proto,
68 btf_sk_storage_get_ids,
69 &bpf_sk_storage_get_proto);
70 convert_sk_func_proto(&btf_sk_storage_delete_proto,
71 btf_sk_storage_delete_ids,
72 &bpf_sk_storage_delete_proto);
73
74 return 0;
75}
76
77static bool is_optional(u32 member_offset)
78{
79 unsigned int i;
80
81 for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
82 if (member_offset == optional_ops[i])
83 return true;
84 }
85
86 return false;
87}
88
89static bool is_unsupported(u32 member_offset)
90{
91 unsigned int i;
92
93 for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
94 if (member_offset == unsupported_ops[i])
95 return true;
96 }
97
98 return false;
99}
100
101extern struct btf *btf_vmlinux;
102
103static bool bpf_tcp_ca_is_valid_access(int off, int size,
104 enum bpf_access_type type,
105 const struct bpf_prog *prog,
106 struct bpf_insn_access_aux *info)
107{
108 if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
109 return false;
110 if (type != BPF_READ)
111 return false;
112 if (off % size != 0)
113 return false;
114
115 if (!btf_ctx_access(off, size, type, prog, info))
116 return false;
117
118 if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
119 /* promote it to tcp_sock */
120 info->btf_id = tcp_sock_id;
121
122 return true;
123}
124
125static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
126 const struct btf_type *t, int off,
127 int size, enum bpf_access_type atype,
128 u32 *next_btf_id)
129{
130 size_t end;
131
132 if (atype == BPF_READ)
133 return btf_struct_access(log, t, off, size, atype, next_btf_id);
134
135 if (t != tcp_sock_type) {
136 bpf_log(log, "only read is supported\n");
137 return -EACCES;
138 }
139
140 switch (off) {
141 case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
142 end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
143 break;
144 case offsetof(struct inet_connection_sock, icsk_ack.pending):
145 end = offsetofend(struct inet_connection_sock,
146 icsk_ack.pending);
147 break;
148 case offsetof(struct tcp_sock, snd_cwnd):
149 end = offsetofend(struct tcp_sock, snd_cwnd);
150 break;
151 case offsetof(struct tcp_sock, snd_cwnd_cnt):
152 end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
153 break;
154 case offsetof(struct tcp_sock, snd_ssthresh):
155 end = offsetofend(struct tcp_sock, snd_ssthresh);
156 break;
157 case offsetof(struct tcp_sock, ecn_flags):
158 end = offsetofend(struct tcp_sock, ecn_flags);
159 break;
160 default:
161 bpf_log(log, "no write support to tcp_sock at off %d\n", off);
162 return -EACCES;
163 }
164
165 if (off + size > end) {
166 bpf_log(log,
167 "write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
168 off, size, end);
169 return -EACCES;
170 }
171
172 return NOT_INIT;
173}
174
175BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
176{
177 /* bpf_tcp_ca prog cannot have NULL tp */
178 __tcp_send_ack((struct sock *)tp, rcv_nxt);
179 return 0;
180}
181
182static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
183 .func = bpf_tcp_send_ack,
184 .gpl_only = false,
185 /* In case we want to report error later */
186 .ret_type = RET_INTEGER,
187 .arg1_type = ARG_PTR_TO_BTF_ID,
188 .arg2_type = ARG_ANYTHING,
189 .btf_id = &tcp_sock_id,
190};
191
192static const struct bpf_func_proto *
193bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
194 const struct bpf_prog *prog)
195{
196 switch (func_id) {
197 case BPF_FUNC_tcp_send_ack:
198 return &bpf_tcp_send_ack_proto;
199 case BPF_FUNC_sk_storage_get:
200 return &btf_sk_storage_get_proto;
201 case BPF_FUNC_sk_storage_delete:
202 return &btf_sk_storage_delete_proto;
203 default:
204 return bpf_base_func_proto(func_id);
205 }
206}
207
208static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
209 .get_func_proto = bpf_tcp_ca_get_func_proto,
210 .is_valid_access = bpf_tcp_ca_is_valid_access,
211 .btf_struct_access = bpf_tcp_ca_btf_struct_access,
212};
213
214static int bpf_tcp_ca_init_member(const struct btf_type *t,
215 const struct btf_member *member,
216 void *kdata, const void *udata)
217{
218 const struct tcp_congestion_ops *utcp_ca;
219 struct tcp_congestion_ops *tcp_ca;
220 int prog_fd;
221 u32 moff;
222
223 utcp_ca = (const struct tcp_congestion_ops *)udata;
224 tcp_ca = (struct tcp_congestion_ops *)kdata;
225
226 moff = btf_member_bit_offset(t, member) / 8;
227 switch (moff) {
228 case offsetof(struct tcp_congestion_ops, flags):
229 if (utcp_ca->flags & ~TCP_CONG_MASK)
230 return -EINVAL;
231 tcp_ca->flags = utcp_ca->flags;
232 return 1;
233 case offsetof(struct tcp_congestion_ops, name):
234 if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
235 sizeof(tcp_ca->name)) <= 0)
236 return -EINVAL;
237 if (tcp_ca_find(utcp_ca->name))
238 return -EEXIST;
239 return 1;
240 }
241
242 if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
243 return 0;
244
245 /* Ensure bpf_prog is provided for compulsory func ptr */
246 prog_fd = (int)(*(unsigned long *)(udata + moff));
247 if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
248 return -EINVAL;
249
250 return 0;
251}
252
253static int bpf_tcp_ca_check_member(const struct btf_type *t,
254 const struct btf_member *member)
255{
256 if (is_unsupported(btf_member_bit_offset(t, member) / 8))
257 return -ENOTSUPP;
258 return 0;
259}
260
261static int bpf_tcp_ca_reg(void *kdata)
262{
263 return tcp_register_congestion_control(kdata);
264}
265
266static void bpf_tcp_ca_unreg(void *kdata)
267{
268 tcp_unregister_congestion_control(kdata);
269}
270
271/* Avoid sparse warning. It is only used in bpf_struct_ops.c. */
272extern struct bpf_struct_ops bpf_tcp_congestion_ops;
273
274struct bpf_struct_ops bpf_tcp_congestion_ops = {
275 .verifier_ops = &bpf_tcp_ca_verifier_ops,
276 .reg = bpf_tcp_ca_reg,
277 .unreg = bpf_tcp_ca_unreg,
278 .check_member = bpf_tcp_ca_check_member,
279 .init_member = bpf_tcp_ca_init_member,
280 .init = bpf_tcp_ca_init,
281 .name = "tcp_congestion_ops",
282};