Loading...
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/skbuff.h>
8#include <linux/netdevice.h>
9#include <linux/version.h>
10#include <uapi/linux/bpf.h>
11#include <bpf/bpf_helpers.h>
12#include <bpf/bpf_tracing.h>
13#include <bpf/bpf_core_read.h>
14#include "trace_common.h"
15
16#define MAX_ENTRIES 1000
17#define MAX_NR_CPUS 1024
18
19struct {
20 __uint(type, BPF_MAP_TYPE_HASH);
21 __type(key, u32);
22 __type(value, long);
23 __uint(max_entries, MAX_ENTRIES);
24} hash_map SEC(".maps");
25
26struct {
27 __uint(type, BPF_MAP_TYPE_LRU_HASH);
28 __type(key, u32);
29 __type(value, long);
30 __uint(max_entries, 10000);
31} lru_hash_map SEC(".maps");
32
33struct {
34 __uint(type, BPF_MAP_TYPE_LRU_HASH);
35 __type(key, u32);
36 __type(value, long);
37 __uint(max_entries, 10000);
38 __uint(map_flags, BPF_F_NO_COMMON_LRU);
39} nocommon_lru_hash_map SEC(".maps");
40
41struct inner_lru {
42 __uint(type, BPF_MAP_TYPE_LRU_HASH);
43 __type(key, u32);
44 __type(value, long);
45 __uint(max_entries, MAX_ENTRIES);
46 __uint(map_flags, BPF_F_NUMA_NODE);
47 __uint(numa_node, 0);
48} inner_lru_hash_map SEC(".maps");
49
50struct {
51 __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
52 __uint(max_entries, MAX_NR_CPUS);
53 __uint(key_size, sizeof(u32));
54 __array(values, struct inner_lru); /* use inner_lru as inner map */
55} array_of_lru_hashs SEC(".maps") = {
56 /* statically initialize the first element */
57 .values = { &inner_lru_hash_map },
58};
59
60struct {
61 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
62 __uint(key_size, sizeof(u32));
63 __uint(value_size, sizeof(long));
64 __uint(max_entries, MAX_ENTRIES);
65} percpu_hash_map SEC(".maps");
66
67struct {
68 __uint(type, BPF_MAP_TYPE_HASH);
69 __type(key, u32);
70 __type(value, long);
71 __uint(max_entries, MAX_ENTRIES);
72 __uint(map_flags, BPF_F_NO_PREALLOC);
73} hash_map_alloc SEC(".maps");
74
75struct {
76 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
77 __uint(key_size, sizeof(u32));
78 __uint(value_size, sizeof(long));
79 __uint(max_entries, MAX_ENTRIES);
80 __uint(map_flags, BPF_F_NO_PREALLOC);
81} percpu_hash_map_alloc SEC(".maps");
82
83struct {
84 __uint(type, BPF_MAP_TYPE_LPM_TRIE);
85 __uint(key_size, 8);
86 __uint(value_size, sizeof(long));
87 __uint(max_entries, 10000);
88 __uint(map_flags, BPF_F_NO_PREALLOC);
89} lpm_trie_map_alloc SEC(".maps");
90
91struct {
92 __uint(type, BPF_MAP_TYPE_ARRAY);
93 __type(key, u32);
94 __type(value, long);
95 __uint(max_entries, MAX_ENTRIES);
96} array_map SEC(".maps");
97
98struct {
99 __uint(type, BPF_MAP_TYPE_LRU_HASH);
100 __type(key, u32);
101 __type(value, long);
102 __uint(max_entries, MAX_ENTRIES);
103} lru_hash_lookup_map SEC(".maps");
104
105SEC("kprobe/" SYSCALL(sys_getuid))
106int stress_hmap(struct pt_regs *ctx)
107{
108 u32 key = bpf_get_current_pid_tgid();
109 long init_val = 1;
110 long *value;
111
112 bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
113 value = bpf_map_lookup_elem(&hash_map, &key);
114 if (value)
115 bpf_map_delete_elem(&hash_map, &key);
116
117 return 0;
118}
119
120SEC("kprobe/" SYSCALL(sys_geteuid))
121int stress_percpu_hmap(struct pt_regs *ctx)
122{
123 u32 key = bpf_get_current_pid_tgid();
124 long init_val = 1;
125 long *value;
126
127 bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
128 value = bpf_map_lookup_elem(&percpu_hash_map, &key);
129 if (value)
130 bpf_map_delete_elem(&percpu_hash_map, &key);
131 return 0;
132}
133
134SEC("kprobe/" SYSCALL(sys_getgid))
135int stress_hmap_alloc(struct pt_regs *ctx)
136{
137 u32 key = bpf_get_current_pid_tgid();
138 long init_val = 1;
139 long *value;
140
141 bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
142 value = bpf_map_lookup_elem(&hash_map_alloc, &key);
143 if (value)
144 bpf_map_delete_elem(&hash_map_alloc, &key);
145 return 0;
146}
147
148SEC("kprobe/" SYSCALL(sys_getegid))
149int stress_percpu_hmap_alloc(struct pt_regs *ctx)
150{
151 u32 key = bpf_get_current_pid_tgid();
152 long init_val = 1;
153 long *value;
154
155 bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
156 value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
157 if (value)
158 bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
159 return 0;
160}
161
162SEC("kprobe/" SYSCALL(sys_connect))
163int stress_lru_hmap_alloc(struct pt_regs *ctx)
164{
165 struct pt_regs *real_regs = (struct pt_regs *)PT_REGS_PARM1_CORE(ctx);
166 char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
167 union {
168 u16 dst6[8];
169 struct {
170 u16 magic0;
171 u16 magic1;
172 u16 tcase;
173 u16 unused16;
174 u32 unused32;
175 u32 key;
176 };
177 } test_params;
178 struct sockaddr_in6 *in6;
179 u16 test_case;
180 int addrlen, ret;
181 long val = 1;
182 u32 key = 0;
183
184 in6 = (struct sockaddr_in6 *)PT_REGS_PARM2_CORE(real_regs);
185 addrlen = (int)PT_REGS_PARM3_CORE(real_regs);
186
187 if (addrlen != sizeof(*in6))
188 return 0;
189
190 ret = bpf_probe_read_user(test_params.dst6, sizeof(test_params.dst6),
191 &in6->sin6_addr);
192 if (ret)
193 goto done;
194
195 if (test_params.magic0 != 0xdead ||
196 test_params.magic1 != 0xbeef)
197 return 0;
198
199 test_case = test_params.tcase;
200 if (test_case != 3)
201 key = bpf_get_prandom_u32();
202
203 if (test_case == 0) {
204 ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY);
205 } else if (test_case == 1) {
206 ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val,
207 BPF_ANY);
208 } else if (test_case == 2) {
209 void *nolocal_lru_map;
210 int cpu = bpf_get_smp_processor_id();
211
212 nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs,
213 &cpu);
214 if (!nolocal_lru_map) {
215 ret = -ENOENT;
216 goto done;
217 }
218
219 ret = bpf_map_update_elem(nolocal_lru_map, &key, &val,
220 BPF_ANY);
221 } else if (test_case == 3) {
222 u32 i;
223
224 key = test_params.key;
225
226#pragma clang loop unroll(full)
227 for (i = 0; i < 32; i++) {
228 bpf_map_lookup_elem(&lru_hash_lookup_map, &key);
229 key++;
230 }
231 } else {
232 ret = -EINVAL;
233 }
234
235done:
236 if (ret)
237 bpf_trace_printk(fmt, sizeof(fmt), ret);
238
239 return 0;
240}
241
242SEC("kprobe/" SYSCALL(sys_gettid))
243int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
244{
245 union {
246 u32 b32[2];
247 u8 b8[8];
248 } key;
249 unsigned int i;
250
251 key.b32[0] = 32;
252 key.b8[4] = 192;
253 key.b8[5] = 168;
254 key.b8[6] = 0;
255 key.b8[7] = 1;
256
257#pragma clang loop unroll(full)
258 for (i = 0; i < 32; ++i)
259 bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
260
261 return 0;
262}
263
264SEC("kprobe/" SYSCALL(sys_getpgid))
265int stress_hash_map_lookup(struct pt_regs *ctx)
266{
267 u32 key = 1, i;
268 long *value;
269
270#pragma clang loop unroll(full)
271 for (i = 0; i < 64; ++i)
272 value = bpf_map_lookup_elem(&hash_map, &key);
273
274 return 0;
275}
276
277SEC("kprobe/" SYSCALL(sys_getppid))
278int stress_array_map_lookup(struct pt_regs *ctx)
279{
280 u32 key = 1, i;
281 long *value;
282
283#pragma clang loop unroll(full)
284 for (i = 0; i < 64; ++i)
285 value = bpf_map_lookup_elem(&array_map, &key);
286
287 return 0;
288}
289
290char _license[] SEC("license") = "GPL";
291u32 _version SEC("version") = LINUX_VERSION_CODE;
1/* Copyright (c) 2016 Facebook
2 *
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
6 */
7#include <linux/skbuff.h>
8#include <linux/netdevice.h>
9#include <linux/version.h>
10#include <uapi/linux/bpf.h>
11#include "bpf_helpers.h"
12
13#define MAX_ENTRIES 1000
14#define MAX_NR_CPUS 1024
15
16struct bpf_map_def SEC("maps") hash_map = {
17 .type = BPF_MAP_TYPE_HASH,
18 .key_size = sizeof(u32),
19 .value_size = sizeof(long),
20 .max_entries = MAX_ENTRIES,
21};
22
23struct bpf_map_def SEC("maps") lru_hash_map = {
24 .type = BPF_MAP_TYPE_LRU_HASH,
25 .key_size = sizeof(u32),
26 .value_size = sizeof(long),
27 .max_entries = 10000,
28};
29
30struct bpf_map_def SEC("maps") nocommon_lru_hash_map = {
31 .type = BPF_MAP_TYPE_LRU_HASH,
32 .key_size = sizeof(u32),
33 .value_size = sizeof(long),
34 .max_entries = 10000,
35 .map_flags = BPF_F_NO_COMMON_LRU,
36};
37
38struct bpf_map_def SEC("maps") inner_lru_hash_map = {
39 .type = BPF_MAP_TYPE_LRU_HASH,
40 .key_size = sizeof(u32),
41 .value_size = sizeof(long),
42 .max_entries = MAX_ENTRIES,
43 .map_flags = BPF_F_NUMA_NODE,
44 .numa_node = 0,
45};
46
47struct bpf_map_def SEC("maps") array_of_lru_hashs = {
48 .type = BPF_MAP_TYPE_ARRAY_OF_MAPS,
49 .key_size = sizeof(u32),
50 .max_entries = MAX_NR_CPUS,
51};
52
53struct bpf_map_def SEC("maps") percpu_hash_map = {
54 .type = BPF_MAP_TYPE_PERCPU_HASH,
55 .key_size = sizeof(u32),
56 .value_size = sizeof(long),
57 .max_entries = MAX_ENTRIES,
58};
59
60struct bpf_map_def SEC("maps") hash_map_alloc = {
61 .type = BPF_MAP_TYPE_HASH,
62 .key_size = sizeof(u32),
63 .value_size = sizeof(long),
64 .max_entries = MAX_ENTRIES,
65 .map_flags = BPF_F_NO_PREALLOC,
66};
67
68struct bpf_map_def SEC("maps") percpu_hash_map_alloc = {
69 .type = BPF_MAP_TYPE_PERCPU_HASH,
70 .key_size = sizeof(u32),
71 .value_size = sizeof(long),
72 .max_entries = MAX_ENTRIES,
73 .map_flags = BPF_F_NO_PREALLOC,
74};
75
76struct bpf_map_def SEC("maps") lpm_trie_map_alloc = {
77 .type = BPF_MAP_TYPE_LPM_TRIE,
78 .key_size = 8,
79 .value_size = sizeof(long),
80 .max_entries = 10000,
81 .map_flags = BPF_F_NO_PREALLOC,
82};
83
84struct bpf_map_def SEC("maps") array_map = {
85 .type = BPF_MAP_TYPE_ARRAY,
86 .key_size = sizeof(u32),
87 .value_size = sizeof(long),
88 .max_entries = MAX_ENTRIES,
89};
90
91struct bpf_map_def SEC("maps") lru_hash_lookup_map = {
92 .type = BPF_MAP_TYPE_LRU_HASH,
93 .key_size = sizeof(u32),
94 .value_size = sizeof(long),
95 .max_entries = MAX_ENTRIES,
96};
97
98SEC("kprobe/sys_getuid")
99int stress_hmap(struct pt_regs *ctx)
100{
101 u32 key = bpf_get_current_pid_tgid();
102 long init_val = 1;
103 long *value;
104
105 bpf_map_update_elem(&hash_map, &key, &init_val, BPF_ANY);
106 value = bpf_map_lookup_elem(&hash_map, &key);
107 if (value)
108 bpf_map_delete_elem(&hash_map, &key);
109
110 return 0;
111}
112
113SEC("kprobe/sys_geteuid")
114int stress_percpu_hmap(struct pt_regs *ctx)
115{
116 u32 key = bpf_get_current_pid_tgid();
117 long init_val = 1;
118 long *value;
119
120 bpf_map_update_elem(&percpu_hash_map, &key, &init_val, BPF_ANY);
121 value = bpf_map_lookup_elem(&percpu_hash_map, &key);
122 if (value)
123 bpf_map_delete_elem(&percpu_hash_map, &key);
124 return 0;
125}
126
127SEC("kprobe/sys_getgid")
128int stress_hmap_alloc(struct pt_regs *ctx)
129{
130 u32 key = bpf_get_current_pid_tgid();
131 long init_val = 1;
132 long *value;
133
134 bpf_map_update_elem(&hash_map_alloc, &key, &init_val, BPF_ANY);
135 value = bpf_map_lookup_elem(&hash_map_alloc, &key);
136 if (value)
137 bpf_map_delete_elem(&hash_map_alloc, &key);
138 return 0;
139}
140
141SEC("kprobe/sys_getegid")
142int stress_percpu_hmap_alloc(struct pt_regs *ctx)
143{
144 u32 key = bpf_get_current_pid_tgid();
145 long init_val = 1;
146 long *value;
147
148 bpf_map_update_elem(&percpu_hash_map_alloc, &key, &init_val, BPF_ANY);
149 value = bpf_map_lookup_elem(&percpu_hash_map_alloc, &key);
150 if (value)
151 bpf_map_delete_elem(&percpu_hash_map_alloc, &key);
152 return 0;
153}
154
155SEC("kprobe/sys_connect")
156int stress_lru_hmap_alloc(struct pt_regs *ctx)
157{
158 char fmt[] = "Failed at stress_lru_hmap_alloc. ret:%dn";
159 union {
160 u16 dst6[8];
161 struct {
162 u16 magic0;
163 u16 magic1;
164 u16 tcase;
165 u16 unused16;
166 u32 unused32;
167 u32 key;
168 };
169 } test_params;
170 struct sockaddr_in6 *in6;
171 u16 test_case;
172 int addrlen, ret;
173 long val = 1;
174 u32 key = 0;
175
176 in6 = (struct sockaddr_in6 *)PT_REGS_PARM2(ctx);
177 addrlen = (int)PT_REGS_PARM3(ctx);
178
179 if (addrlen != sizeof(*in6))
180 return 0;
181
182 ret = bpf_probe_read(test_params.dst6, sizeof(test_params.dst6),
183 &in6->sin6_addr);
184 if (ret)
185 goto done;
186
187 if (test_params.magic0 != 0xdead ||
188 test_params.magic1 != 0xbeef)
189 return 0;
190
191 test_case = test_params.tcase;
192 if (test_case != 3)
193 key = bpf_get_prandom_u32();
194
195 if (test_case == 0) {
196 ret = bpf_map_update_elem(&lru_hash_map, &key, &val, BPF_ANY);
197 } else if (test_case == 1) {
198 ret = bpf_map_update_elem(&nocommon_lru_hash_map, &key, &val,
199 BPF_ANY);
200 } else if (test_case == 2) {
201 void *nolocal_lru_map;
202 int cpu = bpf_get_smp_processor_id();
203
204 nolocal_lru_map = bpf_map_lookup_elem(&array_of_lru_hashs,
205 &cpu);
206 if (!nolocal_lru_map) {
207 ret = -ENOENT;
208 goto done;
209 }
210
211 ret = bpf_map_update_elem(nolocal_lru_map, &key, &val,
212 BPF_ANY);
213 } else if (test_case == 3) {
214 u32 i;
215
216 key = test_params.key;
217
218#pragma clang loop unroll(full)
219 for (i = 0; i < 32; i++) {
220 bpf_map_lookup_elem(&lru_hash_lookup_map, &key);
221 key++;
222 }
223 } else {
224 ret = -EINVAL;
225 }
226
227done:
228 if (ret)
229 bpf_trace_printk(fmt, sizeof(fmt), ret);
230
231 return 0;
232}
233
234SEC("kprobe/sys_gettid")
235int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
236{
237 union {
238 u32 b32[2];
239 u8 b8[8];
240 } key;
241 unsigned int i;
242
243 key.b32[0] = 32;
244 key.b8[4] = 192;
245 key.b8[5] = 168;
246 key.b8[6] = 0;
247 key.b8[7] = 1;
248
249#pragma clang loop unroll(full)
250 for (i = 0; i < 32; ++i)
251 bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
252
253 return 0;
254}
255
256SEC("kprobe/sys_getpgid")
257int stress_hash_map_lookup(struct pt_regs *ctx)
258{
259 u32 key = 1, i;
260 long *value;
261
262#pragma clang loop unroll(full)
263 for (i = 0; i < 64; ++i)
264 value = bpf_map_lookup_elem(&hash_map, &key);
265
266 return 0;
267}
268
269SEC("kprobe/sys_getppid")
270int stress_array_map_lookup(struct pt_regs *ctx)
271{
272 u32 key = 1, i;
273 long *value;
274
275#pragma clang loop unroll(full)
276 for (i = 0; i < 64; ++i)
277 value = bpf_map_lookup_elem(&array_map, &key);
278
279 return 0;
280}
281
282char _license[] SEC("license") = "GPL";
283u32 _version SEC("version") = LINUX_VERSION_CODE;