Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
3
4#define _GNU_SOURCE
5#include <unistd.h>
6#include <sys/syscall.h>
7#include <sys/types.h>
8#include <test_progs.h>
9#include "cgrp_ls_tp_btf.skel.h"
10#include "cgrp_ls_recursion.skel.h"
11#include "cgrp_ls_attach_cgroup.skel.h"
12#include "cgrp_ls_negative.skel.h"
13#include "cgrp_ls_sleepable.skel.h"
14#include "network_helpers.h"
15#include "cgroup_helpers.h"
16
17struct socket_cookie {
18 __u64 cookie_key;
19 __u64 cookie_value;
20};
21
22static bool is_cgroup1;
23static int target_hid;
24
25#define CGROUP_MODE_SET(skel) \
26{ \
27 skel->bss->is_cgroup1 = is_cgroup1; \
28 skel->bss->target_hid = target_hid; \
29}
30
31static void cgroup_mode_value_init(bool cgroup, int hid)
32{
33 is_cgroup1 = cgroup;
34 target_hid = hid;
35}
36
37static void test_tp_btf(int cgroup_fd)
38{
39 struct cgrp_ls_tp_btf *skel;
40 long val1 = 1, val2 = 0;
41 int err;
42
43 skel = cgrp_ls_tp_btf__open_and_load();
44 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
45 return;
46
47 CGROUP_MODE_SET(skel);
48
49 /* populate a value in map_b */
50 err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY);
51 if (!ASSERT_OK(err, "map_update_elem"))
52 goto out;
53
54 /* check value */
55 err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val2);
56 if (!ASSERT_OK(err, "map_lookup_elem"))
57 goto out;
58 if (!ASSERT_EQ(val2, 1, "map_lookup_elem, invalid val"))
59 goto out;
60
61 /* delete value */
62 err = bpf_map_delete_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd);
63 if (!ASSERT_OK(err, "map_delete_elem"))
64 goto out;
65
66 skel->bss->target_pid = sys_gettid();
67
68 err = cgrp_ls_tp_btf__attach(skel);
69 if (!ASSERT_OK(err, "skel_attach"))
70 goto out;
71
72 sys_gettid();
73 sys_gettid();
74
75 skel->bss->target_pid = 0;
76
77 /* 3x syscalls: 1x attach and 2x gettid */
78 ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
79 ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
80 ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
81out:
82 cgrp_ls_tp_btf__destroy(skel);
83}
84
85static void test_attach_cgroup(int cgroup_fd)
86{
87 int server_fd = 0, client_fd = 0, err = 0;
88 socklen_t addr_len = sizeof(struct sockaddr_in6);
89 struct cgrp_ls_attach_cgroup *skel;
90 __u32 cookie_expected_value;
91 struct sockaddr_in6 addr;
92 struct socket_cookie val;
93
94 skel = cgrp_ls_attach_cgroup__open_and_load();
95 if (!ASSERT_OK_PTR(skel, "skel_open"))
96 return;
97
98 skel->links.set_cookie = bpf_program__attach_cgroup(
99 skel->progs.set_cookie, cgroup_fd);
100 if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach"))
101 goto out;
102
103 skel->links.update_cookie_sockops = bpf_program__attach_cgroup(
104 skel->progs.update_cookie_sockops, cgroup_fd);
105 if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach"))
106 goto out;
107
108 skel->links.update_cookie_tracing = bpf_program__attach(
109 skel->progs.update_cookie_tracing);
110 if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach"))
111 goto out;
112
113 server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
114 if (!ASSERT_GE(server_fd, 0, "start_server"))
115 goto out;
116
117 client_fd = connect_to_fd(server_fd, 0);
118 if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
119 goto close_server_fd;
120
121 err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies),
122 &cgroup_fd, &val);
123 if (!ASSERT_OK(err, "map_lookup(socket_cookies)"))
124 goto close_client_fd;
125
126 err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len);
127 if (!ASSERT_OK(err, "getsockname"))
128 goto close_client_fd;
129
130 cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
131 ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value");
132
133close_client_fd:
134 close(client_fd);
135close_server_fd:
136 close(server_fd);
137out:
138 cgrp_ls_attach_cgroup__destroy(skel);
139}
140
141static void test_recursion(int cgroup_fd)
142{
143 struct cgrp_ls_recursion *skel;
144 int err;
145
146 skel = cgrp_ls_recursion__open_and_load();
147 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
148 return;
149
150 CGROUP_MODE_SET(skel);
151
152 err = cgrp_ls_recursion__attach(skel);
153 if (!ASSERT_OK(err, "skel_attach"))
154 goto out;
155
156 /* trigger sys_enter, make sure it does not cause deadlock */
157 sys_gettid();
158
159out:
160 cgrp_ls_recursion__destroy(skel);
161}
162
163static void test_negative(void)
164{
165 struct cgrp_ls_negative *skel;
166
167 skel = cgrp_ls_negative__open_and_load();
168 if (!ASSERT_ERR_PTR(skel, "skel_open_and_load")) {
169 cgrp_ls_negative__destroy(skel);
170 return;
171 }
172}
173
174static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
175{
176 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
177 union bpf_iter_link_info linfo;
178 struct cgrp_ls_sleepable *skel;
179 struct bpf_link *link;
180 int err, iter_fd;
181 char buf[16];
182
183 skel = cgrp_ls_sleepable__open();
184 if (!ASSERT_OK_PTR(skel, "skel_open"))
185 return;
186
187 CGROUP_MODE_SET(skel);
188
189 bpf_program__set_autoload(skel->progs.cgroup_iter, true);
190 err = cgrp_ls_sleepable__load(skel);
191 if (!ASSERT_OK(err, "skel_load"))
192 goto out;
193
194 memset(&linfo, 0, sizeof(linfo));
195 linfo.cgroup.cgroup_fd = cgroup_fd;
196 linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
197 opts.link_info = &linfo;
198 opts.link_info_len = sizeof(linfo);
199 link = bpf_program__attach_iter(skel->progs.cgroup_iter, &opts);
200 if (!ASSERT_OK_PTR(link, "attach_iter"))
201 goto out;
202
203 iter_fd = bpf_iter_create(bpf_link__fd(link));
204 if (!ASSERT_GE(iter_fd, 0, "iter_create"))
205 goto out;
206
207 /* trigger the program run */
208 (void)read(iter_fd, buf, sizeof(buf));
209
210 ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
211
212 close(iter_fd);
213out:
214 cgrp_ls_sleepable__destroy(skel);
215}
216
217static void test_yes_rcu_lock(__u64 cgroup_id)
218{
219 struct cgrp_ls_sleepable *skel;
220 int err;
221
222 skel = cgrp_ls_sleepable__open();
223 if (!ASSERT_OK_PTR(skel, "skel_open"))
224 return;
225
226 CGROUP_MODE_SET(skel);
227 skel->bss->target_pid = sys_gettid();
228
229 bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
230 err = cgrp_ls_sleepable__load(skel);
231 if (!ASSERT_OK(err, "skel_load"))
232 goto out;
233
234 err = cgrp_ls_sleepable__attach(skel);
235 if (!ASSERT_OK(err, "skel_attach"))
236 goto out;
237
238 syscall(SYS_getpgid);
239
240 ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
241out:
242 cgrp_ls_sleepable__destroy(skel);
243}
244
245static void test_no_rcu_lock(void)
246{
247 struct cgrp_ls_sleepable *skel;
248 int err;
249
250 skel = cgrp_ls_sleepable__open();
251 if (!ASSERT_OK_PTR(skel, "skel_open"))
252 return;
253
254 CGROUP_MODE_SET(skel);
255
256 bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
257 err = cgrp_ls_sleepable__load(skel);
258 ASSERT_ERR(err, "skel_load");
259
260 cgrp_ls_sleepable__destroy(skel);
261}
262
263static void test_cgrp1_no_rcu_lock(void)
264{
265 struct cgrp_ls_sleepable *skel;
266 int err;
267
268 skel = cgrp_ls_sleepable__open();
269 if (!ASSERT_OK_PTR(skel, "skel_open"))
270 return;
271
272 CGROUP_MODE_SET(skel);
273
274 bpf_program__set_autoload(skel->progs.cgrp1_no_rcu_lock, true);
275 err = cgrp_ls_sleepable__load(skel);
276 ASSERT_OK(err, "skel_load");
277
278 cgrp_ls_sleepable__destroy(skel);
279}
280
281static void cgrp2_local_storage(void)
282{
283 __u64 cgroup_id;
284 int cgroup_fd;
285
286 cgroup_fd = test__join_cgroup("/cgrp_local_storage");
287 if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage"))
288 return;
289
290 cgroup_mode_value_init(0, -1);
291
292 cgroup_id = get_cgroup_id("/cgrp_local_storage");
293 if (test__start_subtest("tp_btf"))
294 test_tp_btf(cgroup_fd);
295 if (test__start_subtest("attach_cgroup"))
296 test_attach_cgroup(cgroup_fd);
297 if (test__start_subtest("recursion"))
298 test_recursion(cgroup_fd);
299 if (test__start_subtest("negative"))
300 test_negative();
301 if (test__start_subtest("cgroup_iter_sleepable"))
302 test_cgroup_iter_sleepable(cgroup_fd, cgroup_id);
303 if (test__start_subtest("yes_rcu_lock"))
304 test_yes_rcu_lock(cgroup_id);
305 if (test__start_subtest("no_rcu_lock"))
306 test_no_rcu_lock();
307
308 close(cgroup_fd);
309}
310
311static void cgrp1_local_storage(void)
312{
313 int cgrp1_fd, cgrp1_hid, cgrp1_id, err;
314
315 /* Setup cgroup1 hierarchy */
316 err = setup_classid_environment();
317 if (!ASSERT_OK(err, "setup_classid_environment"))
318 return;
319
320 err = join_classid();
321 if (!ASSERT_OK(err, "join_cgroup1"))
322 goto cleanup;
323
324 cgrp1_fd = open_classid();
325 if (!ASSERT_GE(cgrp1_fd, 0, "cgroup1 fd"))
326 goto cleanup;
327
328 cgrp1_id = get_classid_cgroup_id();
329 if (!ASSERT_GE(cgrp1_id, 0, "cgroup1 id"))
330 goto close_fd;
331
332 cgrp1_hid = get_cgroup1_hierarchy_id("net_cls");
333 if (!ASSERT_GE(cgrp1_hid, 0, "cgroup1 hid"))
334 goto close_fd;
335
336 cgroup_mode_value_init(1, cgrp1_hid);
337
338 if (test__start_subtest("cgrp1_tp_btf"))
339 test_tp_btf(cgrp1_fd);
340 if (test__start_subtest("cgrp1_recursion"))
341 test_recursion(cgrp1_fd);
342 if (test__start_subtest("cgrp1_negative"))
343 test_negative();
344 if (test__start_subtest("cgrp1_iter_sleepable"))
345 test_cgroup_iter_sleepable(cgrp1_fd, cgrp1_id);
346 if (test__start_subtest("cgrp1_yes_rcu_lock"))
347 test_yes_rcu_lock(cgrp1_id);
348 if (test__start_subtest("cgrp1_no_rcu_lock"))
349 test_cgrp1_no_rcu_lock();
350
351close_fd:
352 close(cgrp1_fd);
353cleanup:
354 cleanup_classid_environment();
355}
356
357void test_cgrp_local_storage(void)
358{
359 cgrp2_local_storage();
360 cgrp1_local_storage();
361}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2022 Meta Platforms, Inc. and affiliates.*/
3
4#define _GNU_SOURCE
5#include <unistd.h>
6#include <sys/syscall.h>
7#include <sys/types.h>
8#include <test_progs.h>
9#include "cgrp_ls_tp_btf.skel.h"
10#include "cgrp_ls_recursion.skel.h"
11#include "cgrp_ls_attach_cgroup.skel.h"
12#include "cgrp_ls_negative.skel.h"
13#include "cgrp_ls_sleepable.skel.h"
14#include "network_helpers.h"
15#include "cgroup_helpers.h"
16
17struct socket_cookie {
18 __u64 cookie_key;
19 __u32 cookie_value;
20};
21
22static void test_tp_btf(int cgroup_fd)
23{
24 struct cgrp_ls_tp_btf *skel;
25 long val1 = 1, val2 = 0;
26 int err;
27
28 skel = cgrp_ls_tp_btf__open_and_load();
29 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
30 return;
31
32 /* populate a value in map_b */
33 err = bpf_map_update_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val1, BPF_ANY);
34 if (!ASSERT_OK(err, "map_update_elem"))
35 goto out;
36
37 /* check value */
38 err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd, &val2);
39 if (!ASSERT_OK(err, "map_lookup_elem"))
40 goto out;
41 if (!ASSERT_EQ(val2, 1, "map_lookup_elem, invalid val"))
42 goto out;
43
44 /* delete value */
45 err = bpf_map_delete_elem(bpf_map__fd(skel->maps.map_b), &cgroup_fd);
46 if (!ASSERT_OK(err, "map_delete_elem"))
47 goto out;
48
49 skel->bss->target_pid = syscall(SYS_gettid);
50
51 err = cgrp_ls_tp_btf__attach(skel);
52 if (!ASSERT_OK(err, "skel_attach"))
53 goto out;
54
55 syscall(SYS_gettid);
56 syscall(SYS_gettid);
57
58 skel->bss->target_pid = 0;
59
60 /* 3x syscalls: 1x attach and 2x gettid */
61 ASSERT_EQ(skel->bss->enter_cnt, 3, "enter_cnt");
62 ASSERT_EQ(skel->bss->exit_cnt, 3, "exit_cnt");
63 ASSERT_EQ(skel->bss->mismatch_cnt, 0, "mismatch_cnt");
64out:
65 cgrp_ls_tp_btf__destroy(skel);
66}
67
68static void test_attach_cgroup(int cgroup_fd)
69{
70 int server_fd = 0, client_fd = 0, err = 0;
71 socklen_t addr_len = sizeof(struct sockaddr_in6);
72 struct cgrp_ls_attach_cgroup *skel;
73 __u32 cookie_expected_value;
74 struct sockaddr_in6 addr;
75 struct socket_cookie val;
76
77 skel = cgrp_ls_attach_cgroup__open_and_load();
78 if (!ASSERT_OK_PTR(skel, "skel_open"))
79 return;
80
81 skel->links.set_cookie = bpf_program__attach_cgroup(
82 skel->progs.set_cookie, cgroup_fd);
83 if (!ASSERT_OK_PTR(skel->links.set_cookie, "prog_attach"))
84 goto out;
85
86 skel->links.update_cookie_sockops = bpf_program__attach_cgroup(
87 skel->progs.update_cookie_sockops, cgroup_fd);
88 if (!ASSERT_OK_PTR(skel->links.update_cookie_sockops, "prog_attach"))
89 goto out;
90
91 skel->links.update_cookie_tracing = bpf_program__attach(
92 skel->progs.update_cookie_tracing);
93 if (!ASSERT_OK_PTR(skel->links.update_cookie_tracing, "prog_attach"))
94 goto out;
95
96 server_fd = start_server(AF_INET6, SOCK_STREAM, "::1", 0, 0);
97 if (!ASSERT_GE(server_fd, 0, "start_server"))
98 goto out;
99
100 client_fd = connect_to_fd(server_fd, 0);
101 if (!ASSERT_GE(client_fd, 0, "connect_to_fd"))
102 goto close_server_fd;
103
104 err = bpf_map_lookup_elem(bpf_map__fd(skel->maps.socket_cookies),
105 &cgroup_fd, &val);
106 if (!ASSERT_OK(err, "map_lookup(socket_cookies)"))
107 goto close_client_fd;
108
109 err = getsockname(client_fd, (struct sockaddr *)&addr, &addr_len);
110 if (!ASSERT_OK(err, "getsockname"))
111 goto close_client_fd;
112
113 cookie_expected_value = (ntohs(addr.sin6_port) << 8) | 0xFF;
114 ASSERT_EQ(val.cookie_value, cookie_expected_value, "cookie_value");
115
116close_client_fd:
117 close(client_fd);
118close_server_fd:
119 close(server_fd);
120out:
121 cgrp_ls_attach_cgroup__destroy(skel);
122}
123
124static void test_recursion(int cgroup_fd)
125{
126 struct cgrp_ls_recursion *skel;
127 int err;
128
129 skel = cgrp_ls_recursion__open_and_load();
130 if (!ASSERT_OK_PTR(skel, "skel_open_and_load"))
131 return;
132
133 err = cgrp_ls_recursion__attach(skel);
134 if (!ASSERT_OK(err, "skel_attach"))
135 goto out;
136
137 /* trigger sys_enter, make sure it does not cause deadlock */
138 syscall(SYS_gettid);
139
140out:
141 cgrp_ls_recursion__destroy(skel);
142}
143
144static void test_negative(void)
145{
146 struct cgrp_ls_negative *skel;
147
148 skel = cgrp_ls_negative__open_and_load();
149 if (!ASSERT_ERR_PTR(skel, "skel_open_and_load")) {
150 cgrp_ls_negative__destroy(skel);
151 return;
152 }
153}
154
155static void test_cgroup_iter_sleepable(int cgroup_fd, __u64 cgroup_id)
156{
157 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
158 union bpf_iter_link_info linfo;
159 struct cgrp_ls_sleepable *skel;
160 struct bpf_link *link;
161 int err, iter_fd;
162 char buf[16];
163
164 skel = cgrp_ls_sleepable__open();
165 if (!ASSERT_OK_PTR(skel, "skel_open"))
166 return;
167
168 bpf_program__set_autoload(skel->progs.cgroup_iter, true);
169 err = cgrp_ls_sleepable__load(skel);
170 if (!ASSERT_OK(err, "skel_load"))
171 goto out;
172
173 memset(&linfo, 0, sizeof(linfo));
174 linfo.cgroup.cgroup_fd = cgroup_fd;
175 linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
176 opts.link_info = &linfo;
177 opts.link_info_len = sizeof(linfo);
178 link = bpf_program__attach_iter(skel->progs.cgroup_iter, &opts);
179 if (!ASSERT_OK_PTR(link, "attach_iter"))
180 goto out;
181
182 iter_fd = bpf_iter_create(bpf_link__fd(link));
183 if (!ASSERT_GE(iter_fd, 0, "iter_create"))
184 goto out;
185
186 /* trigger the program run */
187 (void)read(iter_fd, buf, sizeof(buf));
188
189 ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
190
191 close(iter_fd);
192out:
193 cgrp_ls_sleepable__destroy(skel);
194}
195
196static void test_no_rcu_lock(__u64 cgroup_id)
197{
198 struct cgrp_ls_sleepable *skel;
199 int err;
200
201 skel = cgrp_ls_sleepable__open();
202 if (!ASSERT_OK_PTR(skel, "skel_open"))
203 return;
204
205 skel->bss->target_pid = syscall(SYS_gettid);
206
207 bpf_program__set_autoload(skel->progs.no_rcu_lock, true);
208 err = cgrp_ls_sleepable__load(skel);
209 if (!ASSERT_OK(err, "skel_load"))
210 goto out;
211
212 err = cgrp_ls_sleepable__attach(skel);
213 if (!ASSERT_OK(err, "skel_attach"))
214 goto out;
215
216 syscall(SYS_getpgid);
217
218 ASSERT_EQ(skel->bss->cgroup_id, cgroup_id, "cgroup_id");
219out:
220 cgrp_ls_sleepable__destroy(skel);
221}
222
223static void test_rcu_lock(void)
224{
225 struct cgrp_ls_sleepable *skel;
226 int err;
227
228 skel = cgrp_ls_sleepable__open();
229 if (!ASSERT_OK_PTR(skel, "skel_open"))
230 return;
231
232 bpf_program__set_autoload(skel->progs.yes_rcu_lock, true);
233 err = cgrp_ls_sleepable__load(skel);
234 ASSERT_ERR(err, "skel_load");
235
236 cgrp_ls_sleepable__destroy(skel);
237}
238
239void test_cgrp_local_storage(void)
240{
241 __u64 cgroup_id;
242 int cgroup_fd;
243
244 cgroup_fd = test__join_cgroup("/cgrp_local_storage");
245 if (!ASSERT_GE(cgroup_fd, 0, "join_cgroup /cgrp_local_storage"))
246 return;
247
248 cgroup_id = get_cgroup_id("/cgrp_local_storage");
249 if (test__start_subtest("tp_btf"))
250 test_tp_btf(cgroup_fd);
251 if (test__start_subtest("attach_cgroup"))
252 test_attach_cgroup(cgroup_fd);
253 if (test__start_subtest("recursion"))
254 test_recursion(cgroup_fd);
255 if (test__start_subtest("negative"))
256 test_negative();
257 if (test__start_subtest("cgroup_iter_sleepable"))
258 test_cgroup_iter_sleepable(cgroup_fd, cgroup_id);
259 if (test__start_subtest("no_rcu_lock"))
260 test_no_rcu_lock(cgroup_id);
261 if (test__start_subtest("rcu_lock"))
262 test_rcu_lock();
263
264 close(cgroup_fd);
265}