Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2021 Facebook */
3#include <test_progs.h>
4#include <network_helpers.h>
5#include "for_each_hash_map_elem.skel.h"
6#include "for_each_array_map_elem.skel.h"
7#include "for_each_map_elem_write_key.skel.h"
8#include "for_each_multi_maps.skel.h"
9
10static unsigned int duration;
11
12static void test_hash_map(void)
13{
14 int i, err, max_entries;
15 struct for_each_hash_map_elem *skel;
16 __u64 *percpu_valbuf = NULL;
17 size_t percpu_val_sz;
18 __u32 key, num_cpus;
19 __u64 val;
20 LIBBPF_OPTS(bpf_test_run_opts, topts,
21 .data_in = &pkt_v4,
22 .data_size_in = sizeof(pkt_v4),
23 .repeat = 1,
24 );
25
26 skel = for_each_hash_map_elem__open_and_load();
27 if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
28 return;
29
30 max_entries = bpf_map__max_entries(skel->maps.hashmap);
31 for (i = 0; i < max_entries; i++) {
32 key = i;
33 val = i + 1;
34 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
35 &val, sizeof(val), BPF_ANY);
36 if (!ASSERT_OK(err, "map_update"))
37 goto out;
38 }
39
40 num_cpus = bpf_num_possible_cpus();
41 percpu_val_sz = sizeof(__u64) * num_cpus;
42 percpu_valbuf = malloc(percpu_val_sz);
43 if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
44 goto out;
45
46 key = 1;
47 for (i = 0; i < num_cpus; i++)
48 percpu_valbuf[i] = i + 1;
49 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
50 percpu_valbuf, percpu_val_sz, BPF_ANY);
51 if (!ASSERT_OK(err, "percpu_map_update"))
52 goto out;
53
54 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
55 duration = topts.duration;
56 if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
57 err, errno, topts.retval))
58 goto out;
59
60 ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output");
61 ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
62
63 key = 1;
64 err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
65 ASSERT_ERR(err, "hashmap_lookup");
66
67 ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
68 ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
69 ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems");
70 ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
71 ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
72 ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
73out:
74 free(percpu_valbuf);
75 for_each_hash_map_elem__destroy(skel);
76}
77
78static void test_array_map(void)
79{
80 __u32 key, num_cpus, max_entries;
81 int i, err;
82 struct for_each_array_map_elem *skel;
83 __u64 *percpu_valbuf = NULL;
84 size_t percpu_val_sz;
85 __u64 val, expected_total;
86 LIBBPF_OPTS(bpf_test_run_opts, topts,
87 .data_in = &pkt_v4,
88 .data_size_in = sizeof(pkt_v4),
89 .repeat = 1,
90 );
91
92 skel = for_each_array_map_elem__open_and_load();
93 if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
94 return;
95
96 expected_total = 0;
97 max_entries = bpf_map__max_entries(skel->maps.arraymap);
98 for (i = 0; i < max_entries; i++) {
99 key = i;
100 val = i + 1;
101 /* skip the last iteration for expected total */
102 if (i != max_entries - 1)
103 expected_total += val;
104 err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
105 &val, sizeof(val), BPF_ANY);
106 if (!ASSERT_OK(err, "map_update"))
107 goto out;
108 }
109
110 num_cpus = bpf_num_possible_cpus();
111 percpu_val_sz = sizeof(__u64) * num_cpus;
112 percpu_valbuf = malloc(percpu_val_sz);
113 if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
114 goto out;
115
116 key = 0;
117 for (i = 0; i < num_cpus; i++)
118 percpu_valbuf[i] = i + 1;
119 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
120 percpu_valbuf, percpu_val_sz, BPF_ANY);
121 if (!ASSERT_OK(err, "percpu_map_update"))
122 goto out;
123
124 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
125 duration = topts.duration;
126 if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
127 err, errno, topts.retval))
128 goto out;
129
130 ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output");
131 ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
132
133out:
134 free(percpu_valbuf);
135 for_each_array_map_elem__destroy(skel);
136}
137
138static void test_write_map_key(void)
139{
140 struct for_each_map_elem_write_key *skel;
141
142 skel = for_each_map_elem_write_key__open_and_load();
143 if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load"))
144 for_each_map_elem_write_key__destroy(skel);
145}
146
147static void test_multi_maps(void)
148{
149 struct for_each_multi_maps *skel;
150 __u64 val, array_total, hash_total;
151 __u32 key, max_entries;
152 int i, err;
153
154 LIBBPF_OPTS(bpf_test_run_opts, topts,
155 .data_in = &pkt_v4,
156 .data_size_in = sizeof(pkt_v4),
157 .repeat = 1,
158 );
159
160 skel = for_each_multi_maps__open_and_load();
161 if (!ASSERT_OK_PTR(skel, "for_each_multi_maps__open_and_load"))
162 return;
163
164 array_total = 0;
165 max_entries = bpf_map__max_entries(skel->maps.arraymap);
166 for (i = 0; i < max_entries; i++) {
167 key = i;
168 val = i + 1;
169 array_total += val;
170 err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
171 &val, sizeof(val), BPF_ANY);
172 if (!ASSERT_OK(err, "array_map_update"))
173 goto out;
174 }
175
176 hash_total = 0;
177 max_entries = bpf_map__max_entries(skel->maps.hashmap);
178 for (i = 0; i < max_entries; i++) {
179 key = i + 100;
180 val = i + 1;
181 hash_total += val;
182 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
183 &val, sizeof(val), BPF_ANY);
184 if (!ASSERT_OK(err, "hash_map_update"))
185 goto out;
186 }
187
188 skel->bss->data_output = 0;
189 skel->bss->use_array = 1;
190 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
191 ASSERT_OK(err, "bpf_prog_test_run_opts");
192 ASSERT_OK(topts.retval, "retval");
193 ASSERT_EQ(skel->bss->data_output, array_total, "array output");
194
195 skel->bss->data_output = 0;
196 skel->bss->use_array = 0;
197 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
198 ASSERT_OK(err, "bpf_prog_test_run_opts");
199 ASSERT_OK(topts.retval, "retval");
200 ASSERT_EQ(skel->bss->data_output, hash_total, "hash output");
201
202out:
203 for_each_multi_maps__destroy(skel);
204}
205
206void test_for_each(void)
207{
208 if (test__start_subtest("hash_map"))
209 test_hash_map();
210 if (test__start_subtest("array_map"))
211 test_array_map();
212 if (test__start_subtest("write_map_key"))
213 test_write_map_key();
214 if (test__start_subtest("multi_maps"))
215 test_multi_maps();
216}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2021 Facebook */
3#include <test_progs.h>
4#include <network_helpers.h>
5#include "for_each_hash_map_elem.skel.h"
6#include "for_each_array_map_elem.skel.h"
7#include "for_each_map_elem_write_key.skel.h"
8
9static unsigned int duration;
10
11static void test_hash_map(void)
12{
13 int i, err, max_entries;
14 struct for_each_hash_map_elem *skel;
15 __u64 *percpu_valbuf = NULL;
16 size_t percpu_val_sz;
17 __u32 key, num_cpus;
18 __u64 val;
19 LIBBPF_OPTS(bpf_test_run_opts, topts,
20 .data_in = &pkt_v4,
21 .data_size_in = sizeof(pkt_v4),
22 .repeat = 1,
23 );
24
25 skel = for_each_hash_map_elem__open_and_load();
26 if (!ASSERT_OK_PTR(skel, "for_each_hash_map_elem__open_and_load"))
27 return;
28
29 max_entries = bpf_map__max_entries(skel->maps.hashmap);
30 for (i = 0; i < max_entries; i++) {
31 key = i;
32 val = i + 1;
33 err = bpf_map__update_elem(skel->maps.hashmap, &key, sizeof(key),
34 &val, sizeof(val), BPF_ANY);
35 if (!ASSERT_OK(err, "map_update"))
36 goto out;
37 }
38
39 num_cpus = bpf_num_possible_cpus();
40 percpu_val_sz = sizeof(__u64) * num_cpus;
41 percpu_valbuf = malloc(percpu_val_sz);
42 if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
43 goto out;
44
45 key = 1;
46 for (i = 0; i < num_cpus; i++)
47 percpu_valbuf[i] = i + 1;
48 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
49 percpu_valbuf, percpu_val_sz, BPF_ANY);
50 if (!ASSERT_OK(err, "percpu_map_update"))
51 goto out;
52
53 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
54 duration = topts.duration;
55 if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
56 err, errno, topts.retval))
57 goto out;
58
59 ASSERT_EQ(skel->bss->hashmap_output, 4, "hashmap_output");
60 ASSERT_EQ(skel->bss->hashmap_elems, max_entries, "hashmap_elems");
61
62 key = 1;
63 err = bpf_map__lookup_elem(skel->maps.hashmap, &key, sizeof(key), &val, sizeof(val), 0);
64 ASSERT_ERR(err, "hashmap_lookup");
65
66 ASSERT_EQ(skel->bss->percpu_called, 1, "percpu_called");
67 ASSERT_LT(skel->bss->cpu, num_cpus, "num_cpus");
68 ASSERT_EQ(skel->bss->percpu_map_elems, 1, "percpu_map_elems");
69 ASSERT_EQ(skel->bss->percpu_key, 1, "percpu_key");
70 ASSERT_EQ(skel->bss->percpu_val, skel->bss->cpu + 1, "percpu_val");
71 ASSERT_EQ(skel->bss->percpu_output, 100, "percpu_output");
72out:
73 free(percpu_valbuf);
74 for_each_hash_map_elem__destroy(skel);
75}
76
77static void test_array_map(void)
78{
79 __u32 key, num_cpus, max_entries;
80 int i, err;
81 struct for_each_array_map_elem *skel;
82 __u64 *percpu_valbuf = NULL;
83 size_t percpu_val_sz;
84 __u64 val, expected_total;
85 LIBBPF_OPTS(bpf_test_run_opts, topts,
86 .data_in = &pkt_v4,
87 .data_size_in = sizeof(pkt_v4),
88 .repeat = 1,
89 );
90
91 skel = for_each_array_map_elem__open_and_load();
92 if (!ASSERT_OK_PTR(skel, "for_each_array_map_elem__open_and_load"))
93 return;
94
95 expected_total = 0;
96 max_entries = bpf_map__max_entries(skel->maps.arraymap);
97 for (i = 0; i < max_entries; i++) {
98 key = i;
99 val = i + 1;
100 /* skip the last iteration for expected total */
101 if (i != max_entries - 1)
102 expected_total += val;
103 err = bpf_map__update_elem(skel->maps.arraymap, &key, sizeof(key),
104 &val, sizeof(val), BPF_ANY);
105 if (!ASSERT_OK(err, "map_update"))
106 goto out;
107 }
108
109 num_cpus = bpf_num_possible_cpus();
110 percpu_val_sz = sizeof(__u64) * num_cpus;
111 percpu_valbuf = malloc(percpu_val_sz);
112 if (!ASSERT_OK_PTR(percpu_valbuf, "percpu_valbuf"))
113 goto out;
114
115 key = 0;
116 for (i = 0; i < num_cpus; i++)
117 percpu_valbuf[i] = i + 1;
118 err = bpf_map__update_elem(skel->maps.percpu_map, &key, sizeof(key),
119 percpu_valbuf, percpu_val_sz, BPF_ANY);
120 if (!ASSERT_OK(err, "percpu_map_update"))
121 goto out;
122
123 err = bpf_prog_test_run_opts(bpf_program__fd(skel->progs.test_pkt_access), &topts);
124 duration = topts.duration;
125 if (CHECK(err || topts.retval, "ipv4", "err %d errno %d retval %d\n",
126 err, errno, topts.retval))
127 goto out;
128
129 ASSERT_EQ(skel->bss->arraymap_output, expected_total, "array_output");
130 ASSERT_EQ(skel->bss->cpu + 1, skel->bss->percpu_val, "percpu_val");
131
132out:
133 free(percpu_valbuf);
134 for_each_array_map_elem__destroy(skel);
135}
136
137static void test_write_map_key(void)
138{
139 struct for_each_map_elem_write_key *skel;
140
141 skel = for_each_map_elem_write_key__open_and_load();
142 if (!ASSERT_ERR_PTR(skel, "for_each_map_elem_write_key__open_and_load"))
143 for_each_map_elem_write_key__destroy(skel);
144}
145
146void test_for_each(void)
147{
148 if (test__start_subtest("hash_map"))
149 test_hash_map();
150 if (test__start_subtest("array_map"))
151 test_array_map();
152 if (test__start_subtest("write_map_key"))
153 test_write_map_key();
154}