Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <test_progs.h>
  3#include <network_helpers.h>
  4#include <net/if.h>
  5#include "test_xdp.skel.h"
  6#include "test_xdp_bpf2bpf.skel.h"
  7
  8struct meta {
  9	int ifindex;
 10	int pkt_len;
 11};
 12
 13struct test_ctx_s {
 14	bool passed;
 15	int pkt_size;
 16};
 17
 18struct test_ctx_s test_ctx;
 19
 20static void on_sample(void *ctx, int cpu, void *data, __u32 size)
 21{
 
 22	struct meta *meta = (struct meta *)data;
 23	struct ipv4_packet *trace_pkt_v4 = data + sizeof(*meta);
 24	unsigned char *raw_pkt = data + sizeof(*meta);
 25	struct test_ctx_s *tst_ctx = ctx;
 26
 27	ASSERT_GE(size, sizeof(pkt_v4) + sizeof(*meta), "check_size");
 28	ASSERT_EQ(meta->ifindex, if_nametoindex("lo"), "check_meta_ifindex");
 29	ASSERT_EQ(meta->pkt_len, tst_ctx->pkt_size, "check_meta_pkt_len");
 30	ASSERT_EQ(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)), 0,
 31		  "check_packet_content");
 32
 33	if (meta->pkt_len > sizeof(pkt_v4)) {
 34		for (int i = 0; i < meta->pkt_len - sizeof(pkt_v4); i++)
 35			ASSERT_EQ(raw_pkt[i + sizeof(pkt_v4)], (unsigned char)i,
 36				  "check_packet_content");
 37	}
 38
 39	tst_ctx->passed = true;
 40}
 41
 42#define BUF_SZ	9000
 43
 44static void run_xdp_bpf2bpf_pkt_size(int pkt_fd, struct perf_buffer *pb,
 45				     struct test_xdp_bpf2bpf *ftrace_skel,
 46				     int pkt_size)
 47{
 48	__u8 *buf, *buf_in;
 49	int err;
 50	LIBBPF_OPTS(bpf_test_run_opts, topts);
 51
 52	if (!ASSERT_LE(pkt_size, BUF_SZ, "pkt_size") ||
 53	    !ASSERT_GE(pkt_size, sizeof(pkt_v4), "pkt_size"))
 54		return;
 55
 56	buf_in = malloc(BUF_SZ);
 57	if (!ASSERT_OK_PTR(buf_in, "buf_in malloc()"))
 58		return;
 59
 60	buf = malloc(BUF_SZ);
 61	if (!ASSERT_OK_PTR(buf, "buf malloc()")) {
 62		free(buf_in);
 63		return;
 64	}
 65
 66	test_ctx.passed = false;
 67	test_ctx.pkt_size = pkt_size;
 68
 69	memcpy(buf_in, &pkt_v4, sizeof(pkt_v4));
 70	if (pkt_size > sizeof(pkt_v4)) {
 71		for (int i = 0; i < (pkt_size - sizeof(pkt_v4)); i++)
 72			buf_in[i + sizeof(pkt_v4)] = i;
 73	}
 74
 75	/* Run test program */
 76	topts.data_in = buf_in;
 77	topts.data_size_in = pkt_size;
 78	topts.data_out = buf;
 79	topts.data_size_out = BUF_SZ;
 80
 81	err = bpf_prog_test_run_opts(pkt_fd, &topts);
 82
 83	ASSERT_OK(err, "ipv4");
 84	ASSERT_EQ(topts.retval, XDP_PASS, "ipv4 retval");
 85	ASSERT_EQ(topts.data_size_out, pkt_size, "ipv4 size");
 86
 87	/* Make sure bpf_xdp_output() was triggered and it sent the expected
 88	 * data to the perf ring buffer.
 89	 */
 90	err = perf_buffer__poll(pb, 100);
 91
 92	ASSERT_GE(err, 0, "perf_buffer__poll");
 93	ASSERT_TRUE(test_ctx.passed, "test passed");
 94	/* Verify test results */
 95	ASSERT_EQ(ftrace_skel->bss->test_result_fentry, if_nametoindex("lo"),
 96		  "fentry result");
 97	ASSERT_EQ(ftrace_skel->bss->test_result_fexit, XDP_PASS, "fexit result");
 98
 99	free(buf);
100	free(buf_in);
101}
102
103void test_xdp_bpf2bpf(void)
104{
 
 
105	int err, pkt_fd, map_fd;
106	int pkt_sizes[] = {sizeof(pkt_v4), 1024, 4100, 8200};
107	struct iptnl_info value4 = {.family = AF_INET6};
 
108	struct test_xdp *pkt_skel = NULL;
109	struct test_xdp_bpf2bpf *ftrace_skel = NULL;
110	struct vip key4 = {.protocol = 6, .family = AF_INET};
111	struct bpf_program *prog;
112	struct perf_buffer *pb = NULL;
 
113
114	/* Load XDP program to introspect */
115	pkt_skel = test_xdp__open_and_load();
116	if (!ASSERT_OK_PTR(pkt_skel, "test_xdp__open_and_load"))
117		return;
118
119	pkt_fd = bpf_program__fd(pkt_skel->progs._xdp_tx_iptunnel);
120
121	map_fd = bpf_map__fd(pkt_skel->maps.vip2tnl);
122	bpf_map_update_elem(map_fd, &key4, &value4, 0);
123
124	/* Load trace program */
125	ftrace_skel = test_xdp_bpf2bpf__open();
126	if (!ASSERT_OK_PTR(ftrace_skel, "test_xdp_bpf2bpf__open"))
127		goto out;
128
129	/* Demonstrate the bpf_program__set_attach_target() API rather than
130	 * the load with options, i.e. opts.attach_prog_fd.
131	 */
132	prog = ftrace_skel->progs.trace_on_entry;
133	bpf_program__set_expected_attach_type(prog, BPF_TRACE_FENTRY);
134	bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
135
136	prog = ftrace_skel->progs.trace_on_exit;
137	bpf_program__set_expected_attach_type(prog, BPF_TRACE_FEXIT);
138	bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
139
140	err = test_xdp_bpf2bpf__load(ftrace_skel);
141	if (!ASSERT_OK(err, "test_xdp_bpf2bpf__load"))
142		goto out;
143
144	err = test_xdp_bpf2bpf__attach(ftrace_skel);
145	if (!ASSERT_OK(err, "test_xdp_bpf2bpf__attach"))
146		goto out;
147
148	/* Set up perf buffer */
149	pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map), 8,
150			      on_sample, NULL, &test_ctx, NULL);
151	if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
 
 
152		goto out;
153
154	for (int i = 0; i < ARRAY_SIZE(pkt_sizes); i++)
155		run_xdp_bpf2bpf_pkt_size(pkt_fd, pb, ftrace_skel,
156					 pkt_sizes[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
157out:
158	perf_buffer__free(pb);
 
159	test_xdp__destroy(pkt_skel);
160	test_xdp_bpf2bpf__destroy(ftrace_skel);
161}
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include <test_progs.h>
  3#include <network_helpers.h>
  4#include <net/if.h>
  5#include "test_xdp.skel.h"
  6#include "test_xdp_bpf2bpf.skel.h"
  7
  8struct meta {
  9	int ifindex;
 10	int pkt_len;
 11};
 12
 
 
 
 
 
 
 
 13static void on_sample(void *ctx, int cpu, void *data, __u32 size)
 14{
 15	int duration = 0;
 16	struct meta *meta = (struct meta *)data;
 17	struct ipv4_packet *trace_pkt_v4 = data + sizeof(*meta);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 18
 19	if (CHECK(size < sizeof(pkt_v4) + sizeof(*meta),
 20		  "check_size", "size %u < %zu\n",
 21		  size, sizeof(pkt_v4) + sizeof(*meta)))
 
 
 
 
 
 
 
 
 
 22		return;
 23
 24	if (CHECK(meta->ifindex != if_nametoindex("lo"), "check_meta_ifindex",
 25		  "meta->ifindex = %d\n", meta->ifindex))
 26		return;
 27
 28	if (CHECK(meta->pkt_len != sizeof(pkt_v4), "check_meta_pkt_len",
 29		  "meta->pkt_len = %zd\n", sizeof(pkt_v4)))
 
 30		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 31
 32	if (CHECK(memcmp(trace_pkt_v4, &pkt_v4, sizeof(pkt_v4)),
 33		  "check_packet_content", "content not the same\n"))
 34		return;
 
 
 
 35
 36	*(bool *)ctx = true;
 
 37}
 38
 39void test_xdp_bpf2bpf(void)
 40{
 41	__u32 duration = 0, retval, size;
 42	char buf[128];
 43	int err, pkt_fd, map_fd;
 44	bool passed = false;
 45	struct iphdr *iph = (void *)buf + sizeof(struct ethhdr);
 46	struct iptnl_info value4 = {.family = AF_INET};
 47	struct test_xdp *pkt_skel = NULL;
 48	struct test_xdp_bpf2bpf *ftrace_skel = NULL;
 49	struct vip key4 = {.protocol = 6, .family = AF_INET};
 50	struct bpf_program *prog;
 51	struct perf_buffer *pb = NULL;
 52	struct perf_buffer_opts pb_opts = {};
 53
 54	/* Load XDP program to introspect */
 55	pkt_skel = test_xdp__open_and_load();
 56	if (CHECK(!pkt_skel, "pkt_skel_load", "test_xdp skeleton failed\n"))
 57		return;
 58
 59	pkt_fd = bpf_program__fd(pkt_skel->progs._xdp_tx_iptunnel);
 60
 61	map_fd = bpf_map__fd(pkt_skel->maps.vip2tnl);
 62	bpf_map_update_elem(map_fd, &key4, &value4, 0);
 63
 64	/* Load trace program */
 65	ftrace_skel = test_xdp_bpf2bpf__open();
 66	if (CHECK(!ftrace_skel, "__open", "ftrace skeleton failed\n"))
 67		goto out;
 68
 69	/* Demonstrate the bpf_program__set_attach_target() API rather than
 70	 * the load with options, i.e. opts.attach_prog_fd.
 71	 */
 72	prog = ftrace_skel->progs.trace_on_entry;
 73	bpf_program__set_expected_attach_type(prog, BPF_TRACE_FENTRY);
 74	bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
 75
 76	prog = ftrace_skel->progs.trace_on_exit;
 77	bpf_program__set_expected_attach_type(prog, BPF_TRACE_FEXIT);
 78	bpf_program__set_attach_target(prog, pkt_fd, "_xdp_tx_iptunnel");
 79
 80	err = test_xdp_bpf2bpf__load(ftrace_skel);
 81	if (CHECK(err, "__load", "ftrace skeleton failed\n"))
 82		goto out;
 83
 84	err = test_xdp_bpf2bpf__attach(ftrace_skel);
 85	if (CHECK(err, "ftrace_attach", "ftrace attach failed: %d\n", err))
 86		goto out;
 87
 88	/* Set up perf buffer */
 89	pb_opts.sample_cb = on_sample;
 90	pb_opts.ctx = &passed;
 91	pb = perf_buffer__new(bpf_map__fd(ftrace_skel->maps.perf_buf_map),
 92			      1, &pb_opts);
 93	if (CHECK(IS_ERR(pb), "perf_buf__new", "err %ld\n", PTR_ERR(pb)))
 94		goto out;
 95
 96	/* Run test program */
 97	err = bpf_prog_test_run(pkt_fd, 1, &pkt_v4, sizeof(pkt_v4),
 98				buf, &size, &retval, &duration);
 99
100	if (CHECK(err || retval != XDP_TX || size != 74 ||
101		  iph->protocol != IPPROTO_IPIP, "ipv4",
102		  "err %d errno %d retval %d size %d\n",
103		  err, errno, retval, size))
104		goto out;
105
106	/* Make sure bpf_xdp_output() was triggered and it sent the expected
107	 * data to the perf ring buffer.
108	 */
109	err = perf_buffer__poll(pb, 100);
110	if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
111		goto out;
112
113	CHECK_FAIL(!passed);
114
115	/* Verify test results */
116	if (CHECK(ftrace_skel->bss->test_result_fentry != if_nametoindex("lo"),
117		  "result", "fentry failed err %llu\n",
118		  ftrace_skel->bss->test_result_fentry))
119		goto out;
120
121	CHECK(ftrace_skel->bss->test_result_fexit != XDP_TX, "result",
122	      "fexit failed err %llu\n", ftrace_skel->bss->test_result_fexit);
123
124out:
125	if (pb)
126		perf_buffer__free(pb);
127	test_xdp__destroy(pkt_skel);
128	test_xdp_bpf2bpf__destroy(ftrace_skel);
129}