Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <test_progs.h>
  4#include "cgroup_helpers.h"
  5#include "testing_helpers.h"
  6#include "test_cgroup_link.skel.h"
  7
  8static __u32 duration = 0;
  9#define PING_CMD	"ping -q -c1 -w1 127.0.0.1 > /dev/null"
 10
 11static struct test_cgroup_link *skel = NULL;
 12
 13int ping_and_check(int exp_calls, int exp_alt_calls)
 14{
 15	skel->bss->calls = 0;
 16	skel->bss->alt_calls = 0;
 17	CHECK_FAIL(system(PING_CMD));
 18	if (CHECK(skel->bss->calls != exp_calls, "call_cnt",
 19		  "exp %d, got %d\n", exp_calls, skel->bss->calls))
 20		return -EINVAL;
 21	if (CHECK(skel->bss->alt_calls != exp_alt_calls, "alt_call_cnt",
 22		  "exp %d, got %d\n", exp_alt_calls, skel->bss->alt_calls))
 23		return -EINVAL;
 24	return 0;
 25}
 26
 27void serial_test_cgroup_link(void)
 28{
 29	struct {
 30		const char *path;
 31		int fd;
 32	} cgs[] = {
 33		{ "/cg1" },
 34		{ "/cg1/cg2" },
 35		{ "/cg1/cg2/cg3" },
 36		{ "/cg1/cg2/cg3/cg4" },
 37	};
 38	int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs);
 39	DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts);
 40	struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link;
 41	__u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags, prog_id;
 42	struct bpf_link_info info;
 43	int i = 0, err, prog_fd;
 44	bool detach_legacy = false;
 45
 46	skel = test_cgroup_link__open_and_load();
 47	if (CHECK(!skel, "skel_open_load", "failed to open/load skeleton\n"))
 48		return;
 49	prog_fd = bpf_program__fd(skel->progs.egress);
 50
 51	err = setup_cgroup_environment();
 52	if (CHECK(err, "cg_init", "failed: %d\n", err))
 53		goto cleanup;
 54
 55	for (i = 0; i < cg_nr; i++) {
 56		cgs[i].fd = create_and_get_cgroup(cgs[i].path);
 57		if (!ASSERT_GE(cgs[i].fd, 0, "cg_create"))
 58			goto cleanup;
 59	}
 60
 61	err = join_cgroup(cgs[last_cg].path);
 62	if (CHECK(err, "cg_join", "fail: %d\n", err))
 63		goto cleanup;
 64
 65	for (i = 0; i < cg_nr; i++) {
 66		links[i] = bpf_program__attach_cgroup(skel->progs.egress,
 67						      cgs[i].fd);
 68		if (!ASSERT_OK_PTR(links[i], "cg_attach"))
 
 69			goto cleanup;
 70	}
 71
 72	ping_and_check(cg_nr, 0);
 73
 74	/* query the number of attached progs and attach flags in root cg */
 75	err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS,
 76			     0, &attach_flags, NULL, &prog_cnt);
 
 77	CHECK_FAIL(err);
 78	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
 79	if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt))
 80		goto cleanup;
 81
 82	/* query the number of effective progs in last cg */
 83	err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
 84			     BPF_F_QUERY_EFFECTIVE, NULL, NULL,
 85			     &prog_cnt);
 86	CHECK_FAIL(err);
 
 87	if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
 88		  cg_nr, prog_cnt))
 89		goto cleanup;
 90
 91	/* query the effective prog IDs in last cg */
 92	err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
 93			     BPF_F_QUERY_EFFECTIVE, NULL, prog_ids,
 94			     &prog_cnt);
 95	CHECK_FAIL(err);
 
 96	if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
 97		  cg_nr, prog_cnt))
 98		goto cleanup;
 99	for (i = 1; i < prog_cnt; i++) {
100		CHECK(prog_ids[i - 1] != prog_ids[i], "prog_id_check",
101		      "idx %d, prev id %d, cur id %d\n",
102		      i, prog_ids[i - 1], prog_ids[i]);
103	}
104
105	/* detach bottom program and ping again */
106	bpf_link__destroy(links[last_cg]);
107	links[last_cg] = NULL;
108
109	ping_and_check(cg_nr - 1, 0);
110
111	/* mix in with non link-based multi-attachments */
112	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
113			      BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI);
114	if (CHECK(err, "cg_attach_legacy", "errno=%d\n", errno))
115		goto cleanup;
116	detach_legacy = true;
117
118	links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
119						    cgs[last_cg].fd);
120	if (!ASSERT_OK_PTR(links[last_cg], "cg_attach"))
 
121		goto cleanup;
122
123	ping_and_check(cg_nr + 1, 0);
124
125	/* detach link */
126	bpf_link__destroy(links[last_cg]);
127	links[last_cg] = NULL;
128
129	/* detach legacy */
130	err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
131	if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
132		goto cleanup;
133	detach_legacy = false;
134
135	/* attach legacy exclusive prog attachment */
136	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
137			      BPF_CGROUP_INET_EGRESS, 0);
138	if (CHECK(err, "cg_attach_exclusive", "errno=%d\n", errno))
139		goto cleanup;
140	detach_legacy = true;
141
142	/* attempt to mix in with multi-attach bpf_link */
143	tmp_link = bpf_program__attach_cgroup(skel->progs.egress,
144					      cgs[last_cg].fd);
145	if (!ASSERT_ERR_PTR(tmp_link, "cg_attach_fail")) {
146		bpf_link__destroy(tmp_link);
147		goto cleanup;
148	}
149
150	ping_and_check(cg_nr, 0);
151
152	/* detach */
153	err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
154	if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
155		goto cleanup;
156	detach_legacy = false;
157
158	ping_and_check(cg_nr - 1, 0);
159
160	/* attach back link-based one */
161	links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
162						    cgs[last_cg].fd);
163	if (!ASSERT_OK_PTR(links[last_cg], "cg_attach"))
 
164		goto cleanup;
165
166	ping_and_check(cg_nr, 0);
167
168	/* check legacy exclusive prog can't be attached */
169	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
170			      BPF_CGROUP_INET_EGRESS, 0);
171	if (CHECK(!err, "cg_attach_exclusive", "unexpected success")) {
172		bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
173		goto cleanup;
174	}
175
176	/* replace BPF programs inside their links for all but first link */
177	for (i = 1; i < cg_nr; i++) {
178		err = bpf_link__update_program(links[i], skel->progs.egress_alt);
179		if (CHECK(err, "prog_upd", "link #%d\n", i))
180			goto cleanup;
181	}
182
183	ping_and_check(1, cg_nr - 1);
184
185	/* Attempt program update with wrong expected BPF program */
186	link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress_alt);
187	link_upd_opts.flags = BPF_F_REPLACE;
188	err = bpf_link_update(bpf_link__fd(links[0]),
189			      bpf_program__fd(skel->progs.egress_alt),
190			      &link_upd_opts);
191	if (CHECK(err == 0 || errno != EPERM, "prog_cmpxchg1",
192		  "unexpectedly succeeded, err %d, errno %d\n", err, -errno))
193		goto cleanup;
194
195	/* Compare-exchange single link program from egress to egress_alt */
196	link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress);
197	link_upd_opts.flags = BPF_F_REPLACE;
198	err = bpf_link_update(bpf_link__fd(links[0]),
199			      bpf_program__fd(skel->progs.egress_alt),
200			      &link_upd_opts);
201	if (CHECK(err, "prog_cmpxchg2", "errno %d\n", -errno))
202		goto cleanup;
203
204	/* ping */
205	ping_and_check(0, cg_nr);
206
207	/* close cgroup FDs before detaching links */
208	for (i = 0; i < cg_nr; i++) {
209		if (cgs[i].fd > 0) {
210			close(cgs[i].fd);
211			cgs[i].fd = -1;
212		}
213	}
214
215	/* BPF programs should still get called */
216	ping_and_check(0, cg_nr);
217
218	prog_id = link_info_prog_id(links[0], &info);
219	CHECK(prog_id == 0, "link_info", "failed\n");
220	CHECK(info.cgroup.cgroup_id == 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
221
222	err = bpf_link__detach(links[0]);
223	if (CHECK(err, "link_detach", "failed %d\n", err))
224		goto cleanup;
225
226	/* cgroup_id should be zero in link_info */
227	prog_id = link_info_prog_id(links[0], &info);
228	CHECK(prog_id == 0, "link_info", "failed\n");
229	CHECK(info.cgroup.cgroup_id != 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
230
231	/* First BPF program shouldn't be called anymore */
232	ping_and_check(0, cg_nr - 1);
233
234	/* leave cgroup and remove them, don't detach programs */
235	cleanup_cgroup_environment();
236
237	/* BPF programs should have been auto-detached */
238	ping_and_check(0, 0);
239
240cleanup:
241	if (detach_legacy)
242		bpf_prog_detach2(prog_fd, cgs[last_cg].fd,
243				 BPF_CGROUP_INET_EGRESS);
244
245	for (i = 0; i < cg_nr; i++) {
246		bpf_link__destroy(links[i]);
 
247	}
248	test_cgroup_link__destroy(skel);
249
250	for (i = 0; i < cg_nr; i++) {
251		if (cgs[i].fd > 0)
252			close(cgs[i].fd);
253	}
254	cleanup_cgroup_environment();
255}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#include <test_progs.h>
  4#include "cgroup_helpers.h"
  5#include "testing_helpers.h"
  6#include "test_cgroup_link.skel.h"
  7
  8static __u32 duration = 0;
  9#define PING_CMD	"ping -q -c1 -w1 127.0.0.1 > /dev/null"
 10
 11static struct test_cgroup_link *skel = NULL;
 12
 13int ping_and_check(int exp_calls, int exp_alt_calls)
 14{
 15	skel->bss->calls = 0;
 16	skel->bss->alt_calls = 0;
 17	CHECK_FAIL(system(PING_CMD));
 18	if (CHECK(skel->bss->calls != exp_calls, "call_cnt",
 19		  "exp %d, got %d\n", exp_calls, skel->bss->calls))
 20		return -EINVAL;
 21	if (CHECK(skel->bss->alt_calls != exp_alt_calls, "alt_call_cnt",
 22		  "exp %d, got %d\n", exp_alt_calls, skel->bss->alt_calls))
 23		return -EINVAL;
 24	return 0;
 25}
 26
 27void test_cgroup_link(void)
 28{
 29	struct {
 30		const char *path;
 31		int fd;
 32	} cgs[] = {
 33		{ "/cg1" },
 34		{ "/cg1/cg2" },
 35		{ "/cg1/cg2/cg3" },
 36		{ "/cg1/cg2/cg3/cg4" },
 37	};
 38	int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs);
 39	DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts);
 40	struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link;
 41	__u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags, prog_id;
 42	struct bpf_link_info info;
 43	int i = 0, err, prog_fd;
 44	bool detach_legacy = false;
 45
 46	skel = test_cgroup_link__open_and_load();
 47	if (CHECK(!skel, "skel_open_load", "failed to open/load skeleton\n"))
 48		return;
 49	prog_fd = bpf_program__fd(skel->progs.egress);
 50
 51	err = setup_cgroup_environment();
 52	if (CHECK(err, "cg_init", "failed: %d\n", err))
 53		goto cleanup;
 54
 55	for (i = 0; i < cg_nr; i++) {
 56		cgs[i].fd = create_and_get_cgroup(cgs[i].path);
 57		if (CHECK(cgs[i].fd < 0, "cg_create", "fail: %d\n", cgs[i].fd))
 58			goto cleanup;
 59	}
 60
 61	err = join_cgroup(cgs[last_cg].path);
 62	if (CHECK(err, "cg_join", "fail: %d\n", err))
 63		goto cleanup;
 64
 65	for (i = 0; i < cg_nr; i++) {
 66		links[i] = bpf_program__attach_cgroup(skel->progs.egress,
 67						      cgs[i].fd);
 68		if (CHECK(IS_ERR(links[i]), "cg_attach", "i: %d, err: %ld\n",
 69				 i, PTR_ERR(links[i])))
 70			goto cleanup;
 71	}
 72
 73	ping_and_check(cg_nr, 0);
 74
 75	/* query the number of effective progs and attach flags in root cg */
 76	err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS,
 77			     BPF_F_QUERY_EFFECTIVE, &attach_flags, NULL,
 78			     &prog_cnt);
 79	CHECK_FAIL(err);
 80	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
 81	if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt))
 82		goto cleanup;
 83
 84	/* query the number of effective progs in last cg */
 85	err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
 86			     BPF_F_QUERY_EFFECTIVE, NULL, NULL,
 87			     &prog_cnt);
 88	CHECK_FAIL(err);
 89	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
 90	if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
 91		  cg_nr, prog_cnt))
 92		goto cleanup;
 93
 94	/* query the effective prog IDs in last cg */
 95	err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
 96			     BPF_F_QUERY_EFFECTIVE, &attach_flags,
 97			     prog_ids, &prog_cnt);
 98	CHECK_FAIL(err);
 99	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
100	if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
101		  cg_nr, prog_cnt))
102		goto cleanup;
103	for (i = 1; i < prog_cnt; i++) {
104		CHECK(prog_ids[i - 1] != prog_ids[i], "prog_id_check",
105		      "idx %d, prev id %d, cur id %d\n",
106		      i, prog_ids[i - 1], prog_ids[i]);
107	}
108
109	/* detach bottom program and ping again */
110	bpf_link__destroy(links[last_cg]);
111	links[last_cg] = NULL;
112
113	ping_and_check(cg_nr - 1, 0);
114
115	/* mix in with non link-based multi-attachments */
116	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
117			      BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI);
118	if (CHECK(err, "cg_attach_legacy", "errno=%d\n", errno))
119		goto cleanup;
120	detach_legacy = true;
121
122	links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
123						    cgs[last_cg].fd);
124	if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
125		  PTR_ERR(links[last_cg])))
126		goto cleanup;
127
128	ping_and_check(cg_nr + 1, 0);
129
130	/* detach link */
131	bpf_link__destroy(links[last_cg]);
132	links[last_cg] = NULL;
133
134	/* detach legacy */
135	err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
136	if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
137		goto cleanup;
138	detach_legacy = false;
139
140	/* attach legacy exclusive prog attachment */
141	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
142			      BPF_CGROUP_INET_EGRESS, 0);
143	if (CHECK(err, "cg_attach_exclusive", "errno=%d\n", errno))
144		goto cleanup;
145	detach_legacy = true;
146
147	/* attempt to mix in with multi-attach bpf_link */
148	tmp_link = bpf_program__attach_cgroup(skel->progs.egress,
149					      cgs[last_cg].fd);
150	if (CHECK(!IS_ERR(tmp_link), "cg_attach_fail", "unexpected success!\n")) {
151		bpf_link__destroy(tmp_link);
152		goto cleanup;
153	}
154
155	ping_and_check(cg_nr, 0);
156
157	/* detach */
158	err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
159	if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
160		goto cleanup;
161	detach_legacy = false;
162
163	ping_and_check(cg_nr - 1, 0);
164
165	/* attach back link-based one */
166	links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
167						    cgs[last_cg].fd);
168	if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
169		  PTR_ERR(links[last_cg])))
170		goto cleanup;
171
172	ping_and_check(cg_nr, 0);
173
174	/* check legacy exclusive prog can't be attached */
175	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
176			      BPF_CGROUP_INET_EGRESS, 0);
177	if (CHECK(!err, "cg_attach_exclusive", "unexpected success")) {
178		bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
179		goto cleanup;
180	}
181
182	/* replace BPF programs inside their links for all but first link */
183	for (i = 1; i < cg_nr; i++) {
184		err = bpf_link__update_program(links[i], skel->progs.egress_alt);
185		if (CHECK(err, "prog_upd", "link #%d\n", i))
186			goto cleanup;
187	}
188
189	ping_and_check(1, cg_nr - 1);
190
191	/* Attempt program update with wrong expected BPF program */
192	link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress_alt);
193	link_upd_opts.flags = BPF_F_REPLACE;
194	err = bpf_link_update(bpf_link__fd(links[0]),
195			      bpf_program__fd(skel->progs.egress_alt),
196			      &link_upd_opts);
197	if (CHECK(err == 0 || errno != EPERM, "prog_cmpxchg1",
198		  "unexpectedly succeeded, err %d, errno %d\n", err, -errno))
199		goto cleanup;
200
201	/* Compare-exchange single link program from egress to egress_alt */
202	link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress);
203	link_upd_opts.flags = BPF_F_REPLACE;
204	err = bpf_link_update(bpf_link__fd(links[0]),
205			      bpf_program__fd(skel->progs.egress_alt),
206			      &link_upd_opts);
207	if (CHECK(err, "prog_cmpxchg2", "errno %d\n", -errno))
208		goto cleanup;
209
210	/* ping */
211	ping_and_check(0, cg_nr);
212
213	/* close cgroup FDs before detaching links */
214	for (i = 0; i < cg_nr; i++) {
215		if (cgs[i].fd > 0) {
216			close(cgs[i].fd);
217			cgs[i].fd = -1;
218		}
219	}
220
221	/* BPF programs should still get called */
222	ping_and_check(0, cg_nr);
223
224	prog_id = link_info_prog_id(links[0], &info);
225	CHECK(prog_id == 0, "link_info", "failed\n");
226	CHECK(info.cgroup.cgroup_id == 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
227
228	err = bpf_link__detach(links[0]);
229	if (CHECK(err, "link_detach", "failed %d\n", err))
230		goto cleanup;
231
232	/* cgroup_id should be zero in link_info */
233	prog_id = link_info_prog_id(links[0], &info);
234	CHECK(prog_id == 0, "link_info", "failed\n");
235	CHECK(info.cgroup.cgroup_id != 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
236
237	/* First BPF program shouldn't be called anymore */
238	ping_and_check(0, cg_nr - 1);
239
240	/* leave cgroup and remove them, don't detach programs */
241	cleanup_cgroup_environment();
242
243	/* BPF programs should have been auto-detached */
244	ping_and_check(0, 0);
245
246cleanup:
247	if (detach_legacy)
248		bpf_prog_detach2(prog_fd, cgs[last_cg].fd,
249				 BPF_CGROUP_INET_EGRESS);
250
251	for (i = 0; i < cg_nr; i++) {
252		if (!IS_ERR(links[i]))
253			bpf_link__destroy(links[i]);
254	}
255	test_cgroup_link__destroy(skel);
256
257	for (i = 0; i < cg_nr; i++) {
258		if (cgs[i].fd > 0)
259			close(cgs[i].fd);
260	}
261	cleanup_cgroup_environment();
262}