Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2019 Facebook  */
  3
 
  4#include <linux/types.h>
  5#include <linux/bpf_verifier.h>
  6#include <linux/bpf.h>
  7#include <linux/btf.h>
 
  8#include <linux/filter.h>
  9#include <net/tcp.h>
 10#include <net/bpf_sk_storage.h>
 11
 12static u32 optional_ops[] = {
 13	offsetof(struct tcp_congestion_ops, init),
 14	offsetof(struct tcp_congestion_ops, release),
 15	offsetof(struct tcp_congestion_ops, set_state),
 16	offsetof(struct tcp_congestion_ops, cwnd_event),
 17	offsetof(struct tcp_congestion_ops, in_ack_event),
 18	offsetof(struct tcp_congestion_ops, pkts_acked),
 19	offsetof(struct tcp_congestion_ops, min_tso_segs),
 20	offsetof(struct tcp_congestion_ops, sndbuf_expand),
 21	offsetof(struct tcp_congestion_ops, cong_control),
 22};
 23
 24static u32 unsupported_ops[] = {
 25	offsetof(struct tcp_congestion_ops, get_info),
 26};
 27
 28static const struct btf_type *tcp_sock_type;
 29static u32 tcp_sock_id, sock_id;
 30
 31static int btf_sk_storage_get_ids[5];
 32static struct bpf_func_proto btf_sk_storage_get_proto __read_mostly;
 33
 34static int btf_sk_storage_delete_ids[5];
 35static struct bpf_func_proto btf_sk_storage_delete_proto __read_mostly;
 36
 37static void convert_sk_func_proto(struct bpf_func_proto *to, int *to_btf_ids,
 38				  const struct bpf_func_proto *from)
 39{
 40	int i;
 41
 42	*to = *from;
 43	to->btf_id = to_btf_ids;
 44	for (i = 0; i < ARRAY_SIZE(to->arg_type); i++) {
 45		if (to->arg_type[i] == ARG_PTR_TO_SOCKET) {
 46			to->arg_type[i] = ARG_PTR_TO_BTF_ID;
 47			to->btf_id[i] = tcp_sock_id;
 48		}
 49	}
 50}
 51
 52static int bpf_tcp_ca_init(struct btf *btf)
 53{
 54	s32 type_id;
 55
 56	type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
 57	if (type_id < 0)
 58		return -EINVAL;
 59	sock_id = type_id;
 60
 61	type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
 62	if (type_id < 0)
 63		return -EINVAL;
 64	tcp_sock_id = type_id;
 65	tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
 66
 67	convert_sk_func_proto(&btf_sk_storage_get_proto,
 68			      btf_sk_storage_get_ids,
 69			      &bpf_sk_storage_get_proto);
 70	convert_sk_func_proto(&btf_sk_storage_delete_proto,
 71			      btf_sk_storage_delete_ids,
 72			      &bpf_sk_storage_delete_proto);
 73
 74	return 0;
 75}
 76
 77static bool is_optional(u32 member_offset)
 78{
 79	unsigned int i;
 80
 81	for (i = 0; i < ARRAY_SIZE(optional_ops); i++) {
 82		if (member_offset == optional_ops[i])
 83			return true;
 84	}
 85
 86	return false;
 87}
 88
 89static bool is_unsupported(u32 member_offset)
 90{
 91	unsigned int i;
 92
 93	for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
 94		if (member_offset == unsupported_ops[i])
 95			return true;
 96	}
 97
 98	return false;
 99}
100
101extern struct btf *btf_vmlinux;
102
103static bool bpf_tcp_ca_is_valid_access(int off, int size,
104				       enum bpf_access_type type,
105				       const struct bpf_prog *prog,
106				       struct bpf_insn_access_aux *info)
107{
108	if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS)
109		return false;
110	if (type != BPF_READ)
111		return false;
112	if (off % size != 0)
113		return false;
114
115	if (!btf_ctx_access(off, size, type, prog, info))
116		return false;
117
118	if (info->reg_type == PTR_TO_BTF_ID && info->btf_id == sock_id)
 
 
119		/* promote it to tcp_sock */
120		info->btf_id = tcp_sock_id;
121
122	return true;
123}
124
125static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
126					const struct btf_type *t, int off,
127					int size, enum bpf_access_type atype,
128					u32 *next_btf_id)
129{
 
130	size_t end;
131
132	if (atype == BPF_READ)
133		return btf_struct_access(log, t, off, size, atype, next_btf_id);
134
135	if (t != tcp_sock_type) {
136		bpf_log(log, "only read is supported\n");
137		return -EACCES;
138	}
139
140	switch (off) {
 
 
 
 
 
 
141	case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
142		end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
143		break;
144	case offsetof(struct inet_connection_sock, icsk_ack.pending):
145		end = offsetofend(struct inet_connection_sock,
146				  icsk_ack.pending);
147		break;
148	case offsetof(struct tcp_sock, snd_cwnd):
149		end = offsetofend(struct tcp_sock, snd_cwnd);
150		break;
151	case offsetof(struct tcp_sock, snd_cwnd_cnt):
152		end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
153		break;
154	case offsetof(struct tcp_sock, snd_ssthresh):
155		end = offsetofend(struct tcp_sock, snd_ssthresh);
156		break;
157	case offsetof(struct tcp_sock, ecn_flags):
158		end = offsetofend(struct tcp_sock, ecn_flags);
159		break;
 
 
 
160	default:
161		bpf_log(log, "no write support to tcp_sock at off %d\n", off);
162		return -EACCES;
163	}
164
165	if (off + size > end) {
166		bpf_log(log,
167			"write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
168			off, size, end);
169		return -EACCES;
170	}
171
172	return NOT_INIT;
173}
174
175BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
176{
177	/* bpf_tcp_ca prog cannot have NULL tp */
178	__tcp_send_ack((struct sock *)tp, rcv_nxt);
179	return 0;
180}
181
182static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
183	.func		= bpf_tcp_send_ack,
184	.gpl_only	= false,
185	/* In case we want to report error later */
186	.ret_type	= RET_INTEGER,
187	.arg1_type	= ARG_PTR_TO_BTF_ID,
 
188	.arg2_type	= ARG_ANYTHING,
189	.btf_id		= &tcp_sock_id,
190};
191
 
 
 
 
 
 
 
 
 
 
 
 
 
192static const struct bpf_func_proto *
193bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
194			  const struct bpf_prog *prog)
195{
196	switch (func_id) {
197	case BPF_FUNC_tcp_send_ack:
198		return &bpf_tcp_send_ack_proto;
199	case BPF_FUNC_sk_storage_get:
200		return &btf_sk_storage_get_proto;
201	case BPF_FUNC_sk_storage_delete:
202		return &btf_sk_storage_delete_proto;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
203	default:
204		return bpf_base_func_proto(func_id);
205	}
206}
207
 
 
 
 
 
 
 
 
 
 
 
 
 
208static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
209	.get_func_proto		= bpf_tcp_ca_get_func_proto,
210	.is_valid_access	= bpf_tcp_ca_is_valid_access,
211	.btf_struct_access	= bpf_tcp_ca_btf_struct_access,
212};
213
214static int bpf_tcp_ca_init_member(const struct btf_type *t,
215				  const struct btf_member *member,
216				  void *kdata, const void *udata)
217{
218	const struct tcp_congestion_ops *utcp_ca;
219	struct tcp_congestion_ops *tcp_ca;
220	int prog_fd;
221	u32 moff;
222
223	utcp_ca = (const struct tcp_congestion_ops *)udata;
224	tcp_ca = (struct tcp_congestion_ops *)kdata;
225
226	moff = btf_member_bit_offset(t, member) / 8;
227	switch (moff) {
228	case offsetof(struct tcp_congestion_ops, flags):
229		if (utcp_ca->flags & ~TCP_CONG_MASK)
230			return -EINVAL;
231		tcp_ca->flags = utcp_ca->flags;
232		return 1;
233	case offsetof(struct tcp_congestion_ops, name):
234		if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
235				     sizeof(tcp_ca->name)) <= 0)
236			return -EINVAL;
237		if (tcp_ca_find(utcp_ca->name))
238			return -EEXIST;
239		return 1;
240	}
241
242	if (!btf_type_resolve_func_ptr(btf_vmlinux, member->type, NULL))
243		return 0;
244
245	/* Ensure bpf_prog is provided for compulsory func ptr */
246	prog_fd = (int)(*(unsigned long *)(udata + moff));
247	if (!prog_fd && !is_optional(moff) && !is_unsupported(moff))
248		return -EINVAL;
249
250	return 0;
251}
252
253static int bpf_tcp_ca_check_member(const struct btf_type *t,
254				   const struct btf_member *member)
 
255{
256	if (is_unsupported(btf_member_bit_offset(t, member) / 8))
257		return -ENOTSUPP;
258	return 0;
259}
260
261static int bpf_tcp_ca_reg(void *kdata)
262{
263	return tcp_register_congestion_control(kdata);
264}
265
266static void bpf_tcp_ca_unreg(void *kdata)
267{
268	tcp_unregister_congestion_control(kdata);
269}
270
271/* Avoid sparse warning.  It is only used in bpf_struct_ops.c. */
272extern struct bpf_struct_ops bpf_tcp_congestion_ops;
 
 
273
274struct bpf_struct_ops bpf_tcp_congestion_ops = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275	.verifier_ops = &bpf_tcp_ca_verifier_ops,
276	.reg = bpf_tcp_ca_reg,
277	.unreg = bpf_tcp_ca_unreg,
 
278	.check_member = bpf_tcp_ca_check_member,
279	.init_member = bpf_tcp_ca_init_member,
280	.init = bpf_tcp_ca_init,
 
281	.name = "tcp_congestion_ops",
 
 
282};
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/* Copyright (c) 2019 Facebook  */
  3
  4#include <linux/init.h>
  5#include <linux/types.h>
  6#include <linux/bpf_verifier.h>
  7#include <linux/bpf.h>
  8#include <linux/btf.h>
  9#include <linux/btf_ids.h>
 10#include <linux/filter.h>
 11#include <net/tcp.h>
 12#include <net/bpf_sk_storage.h>
 13
 14/* "extern" is to avoid sparse warning.  It is only used in bpf_struct_ops.c. */
 15static struct bpf_struct_ops bpf_tcp_congestion_ops;
 
 
 
 
 
 
 
 
 
 16
 17static u32 unsupported_ops[] = {
 18	offsetof(struct tcp_congestion_ops, get_info),
 19};
 20
 21static const struct btf_type *tcp_sock_type;
 22static u32 tcp_sock_id, sock_id;
 23static const struct btf_type *tcp_congestion_ops_type;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 24
 25static int bpf_tcp_ca_init(struct btf *btf)
 26{
 27	s32 type_id;
 28
 29	type_id = btf_find_by_name_kind(btf, "sock", BTF_KIND_STRUCT);
 30	if (type_id < 0)
 31		return -EINVAL;
 32	sock_id = type_id;
 33
 34	type_id = btf_find_by_name_kind(btf, "tcp_sock", BTF_KIND_STRUCT);
 35	if (type_id < 0)
 36		return -EINVAL;
 37	tcp_sock_id = type_id;
 38	tcp_sock_type = btf_type_by_id(btf, tcp_sock_id);
 39
 40	type_id = btf_find_by_name_kind(btf, "tcp_congestion_ops", BTF_KIND_STRUCT);
 41	if (type_id < 0)
 42		return -EINVAL;
 43	tcp_congestion_ops_type = btf_type_by_id(btf, type_id);
 
 
 44
 45	return 0;
 46}
 47
 
 
 
 
 
 
 
 
 
 
 
 
 48static bool is_unsupported(u32 member_offset)
 49{
 50	unsigned int i;
 51
 52	for (i = 0; i < ARRAY_SIZE(unsupported_ops); i++) {
 53		if (member_offset == unsupported_ops[i])
 54			return true;
 55	}
 56
 57	return false;
 58}
 59
 
 
 60static bool bpf_tcp_ca_is_valid_access(int off, int size,
 61				       enum bpf_access_type type,
 62				       const struct bpf_prog *prog,
 63				       struct bpf_insn_access_aux *info)
 64{
 65	if (!bpf_tracing_btf_ctx_access(off, size, type, prog, info))
 
 
 
 
 
 
 
 66		return false;
 67
 68	if (base_type(info->reg_type) == PTR_TO_BTF_ID &&
 69	    !bpf_type_has_unsafe_modifiers(info->reg_type) &&
 70	    info->btf_id == sock_id)
 71		/* promote it to tcp_sock */
 72		info->btf_id = tcp_sock_id;
 73
 74	return true;
 75}
 76
 77static int bpf_tcp_ca_btf_struct_access(struct bpf_verifier_log *log,
 78					const struct bpf_reg_state *reg,
 79					int off, int size)
 
 80{
 81	const struct btf_type *t;
 82	size_t end;
 83
 84	t = btf_type_by_id(reg->btf, reg->btf_id);
 
 
 85	if (t != tcp_sock_type) {
 86		bpf_log(log, "only read is supported\n");
 87		return -EACCES;
 88	}
 89
 90	switch (off) {
 91	case offsetof(struct sock, sk_pacing_rate):
 92		end = offsetofend(struct sock, sk_pacing_rate);
 93		break;
 94	case offsetof(struct sock, sk_pacing_status):
 95		end = offsetofend(struct sock, sk_pacing_status);
 96		break;
 97	case bpf_ctx_range(struct inet_connection_sock, icsk_ca_priv):
 98		end = offsetofend(struct inet_connection_sock, icsk_ca_priv);
 99		break;
100	case offsetof(struct inet_connection_sock, icsk_ack.pending):
101		end = offsetofend(struct inet_connection_sock,
102				  icsk_ack.pending);
103		break;
104	case offsetof(struct tcp_sock, snd_cwnd):
105		end = offsetofend(struct tcp_sock, snd_cwnd);
106		break;
107	case offsetof(struct tcp_sock, snd_cwnd_cnt):
108		end = offsetofend(struct tcp_sock, snd_cwnd_cnt);
109		break;
110	case offsetof(struct tcp_sock, snd_ssthresh):
111		end = offsetofend(struct tcp_sock, snd_ssthresh);
112		break;
113	case offsetof(struct tcp_sock, ecn_flags):
114		end = offsetofend(struct tcp_sock, ecn_flags);
115		break;
116	case offsetof(struct tcp_sock, app_limited):
117		end = offsetofend(struct tcp_sock, app_limited);
118		break;
119	default:
120		bpf_log(log, "no write support to tcp_sock at off %d\n", off);
121		return -EACCES;
122	}
123
124	if (off + size > end) {
125		bpf_log(log,
126			"write access at off %d with size %d beyond the member of tcp_sock ended at %zu\n",
127			off, size, end);
128		return -EACCES;
129	}
130
131	return 0;
132}
133
134BPF_CALL_2(bpf_tcp_send_ack, struct tcp_sock *, tp, u32, rcv_nxt)
135{
136	/* bpf_tcp_ca prog cannot have NULL tp */
137	__tcp_send_ack((struct sock *)tp, rcv_nxt);
138	return 0;
139}
140
141static const struct bpf_func_proto bpf_tcp_send_ack_proto = {
142	.func		= bpf_tcp_send_ack,
143	.gpl_only	= false,
144	/* In case we want to report error later */
145	.ret_type	= RET_INTEGER,
146	.arg1_type	= ARG_PTR_TO_BTF_ID,
147	.arg1_btf_id	= &tcp_sock_id,
148	.arg2_type	= ARG_ANYTHING,
 
149};
150
151static u32 prog_ops_moff(const struct bpf_prog *prog)
152{
153	const struct btf_member *m;
154	const struct btf_type *t;
155	u32 midx;
156
157	midx = prog->expected_attach_type;
158	t = tcp_congestion_ops_type;
159	m = &btf_type_member(t)[midx];
160
161	return __btf_member_bit_offset(t, m) / 8;
162}
163
164static const struct bpf_func_proto *
165bpf_tcp_ca_get_func_proto(enum bpf_func_id func_id,
166			  const struct bpf_prog *prog)
167{
168	switch (func_id) {
169	case BPF_FUNC_tcp_send_ack:
170		return &bpf_tcp_send_ack_proto;
171	case BPF_FUNC_sk_storage_get:
172		return &bpf_sk_storage_get_proto;
173	case BPF_FUNC_sk_storage_delete:
174		return &bpf_sk_storage_delete_proto;
175	case BPF_FUNC_setsockopt:
176		/* Does not allow release() to call setsockopt.
177		 * release() is called when the current bpf-tcp-cc
178		 * is retiring.  It is not allowed to call
179		 * setsockopt() to make further changes which
180		 * may potentially allocate new resources.
181		 */
182		if (prog_ops_moff(prog) !=
183		    offsetof(struct tcp_congestion_ops, release))
184			return &bpf_sk_setsockopt_proto;
185		return NULL;
186	case BPF_FUNC_getsockopt:
187		/* Since get/setsockopt is usually expected to
188		 * be available together, disable getsockopt for
189		 * release also to avoid usage surprise.
190		 * The bpf-tcp-cc already has a more powerful way
191		 * to read tcp_sock from the PTR_TO_BTF_ID.
192		 */
193		if (prog_ops_moff(prog) !=
194		    offsetof(struct tcp_congestion_ops, release))
195			return &bpf_sk_getsockopt_proto;
196		return NULL;
197	case BPF_FUNC_ktime_get_coarse_ns:
198		return &bpf_ktime_get_coarse_ns_proto;
199	default:
200		return bpf_base_func_proto(func_id, prog);
201	}
202}
203
204BTF_KFUNCS_START(bpf_tcp_ca_check_kfunc_ids)
205BTF_ID_FLAGS(func, tcp_reno_ssthresh)
206BTF_ID_FLAGS(func, tcp_reno_cong_avoid)
207BTF_ID_FLAGS(func, tcp_reno_undo_cwnd)
208BTF_ID_FLAGS(func, tcp_slow_start)
209BTF_ID_FLAGS(func, tcp_cong_avoid_ai)
210BTF_KFUNCS_END(bpf_tcp_ca_check_kfunc_ids)
211
212static const struct btf_kfunc_id_set bpf_tcp_ca_kfunc_set = {
213	.owner = THIS_MODULE,
214	.set   = &bpf_tcp_ca_check_kfunc_ids,
215};
216
217static const struct bpf_verifier_ops bpf_tcp_ca_verifier_ops = {
218	.get_func_proto		= bpf_tcp_ca_get_func_proto,
219	.is_valid_access	= bpf_tcp_ca_is_valid_access,
220	.btf_struct_access	= bpf_tcp_ca_btf_struct_access,
221};
222
223static int bpf_tcp_ca_init_member(const struct btf_type *t,
224				  const struct btf_member *member,
225				  void *kdata, const void *udata)
226{
227	const struct tcp_congestion_ops *utcp_ca;
228	struct tcp_congestion_ops *tcp_ca;
 
229	u32 moff;
230
231	utcp_ca = (const struct tcp_congestion_ops *)udata;
232	tcp_ca = (struct tcp_congestion_ops *)kdata;
233
234	moff = __btf_member_bit_offset(t, member) / 8;
235	switch (moff) {
236	case offsetof(struct tcp_congestion_ops, flags):
237		if (utcp_ca->flags & ~TCP_CONG_MASK)
238			return -EINVAL;
239		tcp_ca->flags = utcp_ca->flags;
240		return 1;
241	case offsetof(struct tcp_congestion_ops, name):
242		if (bpf_obj_name_cpy(tcp_ca->name, utcp_ca->name,
243				     sizeof(tcp_ca->name)) <= 0)
244			return -EINVAL;
 
 
245		return 1;
246	}
247
 
 
 
 
 
 
 
 
248	return 0;
249}
250
251static int bpf_tcp_ca_check_member(const struct btf_type *t,
252				   const struct btf_member *member,
253				   const struct bpf_prog *prog)
254{
255	if (is_unsupported(__btf_member_bit_offset(t, member) / 8))
256		return -ENOTSUPP;
257	return 0;
258}
259
260static int bpf_tcp_ca_reg(void *kdata)
261{
262	return tcp_register_congestion_control(kdata);
263}
264
265static void bpf_tcp_ca_unreg(void *kdata)
266{
267	tcp_unregister_congestion_control(kdata);
268}
269
270static int bpf_tcp_ca_update(void *kdata, void *old_kdata)
271{
272	return tcp_update_congestion_control(kdata, old_kdata);
273}
274
275static int bpf_tcp_ca_validate(void *kdata)
276{
277	return tcp_validate_congestion_control(kdata);
278}
279
280static u32 bpf_tcp_ca_ssthresh(struct sock *sk)
281{
282	return 0;
283}
284
285static void bpf_tcp_ca_cong_avoid(struct sock *sk, u32 ack, u32 acked)
286{
287}
288
289static void bpf_tcp_ca_set_state(struct sock *sk, u8 new_state)
290{
291}
292
293static void bpf_tcp_ca_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
294{
295}
296
297static void bpf_tcp_ca_in_ack_event(struct sock *sk, u32 flags)
298{
299}
300
301static void bpf_tcp_ca_pkts_acked(struct sock *sk, const struct ack_sample *sample)
302{
303}
304
305static u32 bpf_tcp_ca_min_tso_segs(struct sock *sk)
306{
307	return 0;
308}
309
310static void bpf_tcp_ca_cong_control(struct sock *sk, const struct rate_sample *rs)
311{
312}
313
314static u32 bpf_tcp_ca_undo_cwnd(struct sock *sk)
315{
316	return 0;
317}
318
319static u32 bpf_tcp_ca_sndbuf_expand(struct sock *sk)
320{
321	return 0;
322}
323
324static void __bpf_tcp_ca_init(struct sock *sk)
325{
326}
327
328static void __bpf_tcp_ca_release(struct sock *sk)
329{
330}
331
332static struct tcp_congestion_ops __bpf_ops_tcp_congestion_ops = {
333	.ssthresh = bpf_tcp_ca_ssthresh,
334	.cong_avoid = bpf_tcp_ca_cong_avoid,
335	.set_state = bpf_tcp_ca_set_state,
336	.cwnd_event = bpf_tcp_ca_cwnd_event,
337	.in_ack_event = bpf_tcp_ca_in_ack_event,
338	.pkts_acked = bpf_tcp_ca_pkts_acked,
339	.min_tso_segs = bpf_tcp_ca_min_tso_segs,
340	.cong_control = bpf_tcp_ca_cong_control,
341	.undo_cwnd = bpf_tcp_ca_undo_cwnd,
342	.sndbuf_expand = bpf_tcp_ca_sndbuf_expand,
343
344	.init = __bpf_tcp_ca_init,
345	.release = __bpf_tcp_ca_release,
346};
347
348static struct bpf_struct_ops bpf_tcp_congestion_ops = {
349	.verifier_ops = &bpf_tcp_ca_verifier_ops,
350	.reg = bpf_tcp_ca_reg,
351	.unreg = bpf_tcp_ca_unreg,
352	.update = bpf_tcp_ca_update,
353	.check_member = bpf_tcp_ca_check_member,
354	.init_member = bpf_tcp_ca_init_member,
355	.init = bpf_tcp_ca_init,
356	.validate = bpf_tcp_ca_validate,
357	.name = "tcp_congestion_ops",
358	.cfi_stubs = &__bpf_ops_tcp_congestion_ops,
359	.owner = THIS_MODULE,
360};
361
362static int __init bpf_tcp_ca_kfunc_init(void)
363{
364	int ret;
365
366	ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &bpf_tcp_ca_kfunc_set);
367	ret = ret ?: register_bpf_struct_ops(&bpf_tcp_congestion_ops, tcp_congestion_ops);
368
369	return ret;
370}
371late_initcall(bpf_tcp_ca_kfunc_init);