Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2021 Facebook */
3#include <linux/bpf.h>
4#include <time.h>
5#include <stdbool.h>
6#include <errno.h>
7#include <bpf/bpf_helpers.h>
8#include <bpf/bpf_tracing.h>
9
10char _license[] SEC("license") = "GPL";
11struct hmap_elem {
12 int counter;
13 struct bpf_timer timer;
14 struct bpf_spin_lock lock; /* unused */
15};
16
17struct {
18 __uint(type, BPF_MAP_TYPE_HASH);
19 __uint(max_entries, 1000);
20 __type(key, int);
21 __type(value, struct hmap_elem);
22} hmap SEC(".maps");
23
24struct {
25 __uint(type, BPF_MAP_TYPE_HASH);
26 __uint(map_flags, BPF_F_NO_PREALLOC);
27 __uint(max_entries, 1000);
28 __type(key, int);
29 __type(value, struct hmap_elem);
30} hmap_malloc SEC(".maps");
31
32struct elem {
33 struct bpf_timer t;
34};
35
36struct {
37 __uint(type, BPF_MAP_TYPE_ARRAY);
38 __uint(max_entries, 2);
39 __type(key, int);
40 __type(value, struct elem);
41} array SEC(".maps");
42
43struct {
44 __uint(type, BPF_MAP_TYPE_LRU_HASH);
45 __uint(max_entries, 4);
46 __type(key, int);
47 __type(value, struct elem);
48} lru SEC(".maps");
49
50struct {
51 __uint(type, BPF_MAP_TYPE_ARRAY);
52 __uint(max_entries, 1);
53 __type(key, int);
54 __type(value, struct elem);
55} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"),
56 race_array SEC(".maps");
57
58__u64 bss_data;
59__u64 abs_data;
60__u64 err;
61__u64 ok;
62__u64 callback_check = 52;
63__u64 callback2_check = 52;
64__u64 pinned_callback_check;
65__s32 pinned_cpu;
66
67#define ARRAY 1
68#define HTAB 2
69#define HTAB_MALLOC 3
70#define LRU 4
71
72/* callback for array and lru timers */
73static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
74{
75 /* increment bss variable twice.
76 * Once via array timer callback and once via lru timer callback
77 */
78 bss_data += 5;
79
80 /* *key == 0 - the callback was called for array timer.
81 * *key == 4 - the callback was called from lru timer.
82 */
83 if (*key == ARRAY) {
84 struct bpf_timer *lru_timer;
85 int lru_key = LRU;
86
87 /* rearm array timer to be called again in ~35 seconds */
88 if (bpf_timer_start(timer, 1ull << 35, 0) != 0)
89 err |= 1;
90
91 lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
92 if (!lru_timer)
93 return 0;
94 bpf_timer_set_callback(lru_timer, timer_cb1);
95 if (bpf_timer_start(lru_timer, 0, 0) != 0)
96 err |= 2;
97 } else if (*key == LRU) {
98 int lru_key, i;
99
100 for (i = LRU + 1;
101 i <= 100 /* for current LRU eviction algorithm this number
102 * should be larger than ~ lru->max_entries * 2
103 */;
104 i++) {
105 struct elem init = {};
106
107 /* lru_key cannot be used as loop induction variable
108 * otherwise the loop will be unbounded.
109 */
110 lru_key = i;
111
112 /* add more elements into lru map to push out current
113 * element and force deletion of this timer
114 */
115 bpf_map_update_elem(map, &lru_key, &init, 0);
116 /* look it up to bump it into active list */
117 bpf_map_lookup_elem(map, &lru_key);
118
119 /* keep adding until *key changes underneath,
120 * which means that key/timer memory was reused
121 */
122 if (*key != LRU)
123 break;
124 }
125
126 /* check that the timer was removed */
127 if (bpf_timer_cancel(timer) != -EINVAL)
128 err |= 4;
129 ok |= 1;
130 }
131 return 0;
132}
133
134SEC("fentry/bpf_fentry_test1")
135int BPF_PROG2(test1, int, a)
136{
137 struct bpf_timer *arr_timer, *lru_timer;
138 struct elem init = {};
139 int lru_key = LRU;
140 int array_key = ARRAY;
141
142 arr_timer = bpf_map_lookup_elem(&array, &array_key);
143 if (!arr_timer)
144 return 0;
145 bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
146
147 bpf_map_update_elem(&lru, &lru_key, &init, 0);
148 lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
149 if (!lru_timer)
150 return 0;
151 bpf_timer_init(lru_timer, &lru, CLOCK_MONOTONIC);
152
153 bpf_timer_set_callback(arr_timer, timer_cb1);
154 bpf_timer_start(arr_timer, 0 /* call timer_cb1 asap */, 0);
155
156 /* init more timers to check that array destruction
157 * doesn't leak timer memory.
158 */
159 array_key = 0;
160 arr_timer = bpf_map_lookup_elem(&array, &array_key);
161 if (!arr_timer)
162 return 0;
163 bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
164 return 0;
165}
166
167/* callback for prealloc and non-prealloca hashtab timers */
168static int timer_cb2(void *map, int *key, struct hmap_elem *val)
169{
170 if (*key == HTAB)
171 callback_check--;
172 else
173 callback2_check--;
174 if (val->counter > 0 && --val->counter) {
175 /* re-arm the timer again to execute after 1 usec */
176 bpf_timer_start(&val->timer, 1000, 0);
177 } else if (*key == HTAB) {
178 struct bpf_timer *arr_timer;
179 int array_key = ARRAY;
180
181 /* cancel arr_timer otherwise bpf_fentry_test1 prog
182 * will stay alive forever.
183 */
184 arr_timer = bpf_map_lookup_elem(&array, &array_key);
185 if (!arr_timer)
186 return 0;
187 if (bpf_timer_cancel(arr_timer) != 1)
188 /* bpf_timer_cancel should return 1 to indicate
189 * that arr_timer was active at this time
190 */
191 err |= 8;
192
193 /* try to cancel ourself. It shouldn't deadlock. */
194 if (bpf_timer_cancel(&val->timer) != -EDEADLK)
195 err |= 16;
196
197 /* delete this key and this timer anyway.
198 * It shouldn't deadlock either.
199 */
200 bpf_map_delete_elem(map, key);
201
202 /* in preallocated hashmap both 'key' and 'val' could have been
203 * reused to store another map element (like in LRU above),
204 * but in controlled test environment the below test works.
205 * It's not a use-after-free. The memory is owned by the map.
206 */
207 if (bpf_timer_start(&val->timer, 1000, 0) != -EINVAL)
208 err |= 32;
209 ok |= 2;
210 } else {
211 if (*key != HTAB_MALLOC)
212 err |= 64;
213
214 /* try to cancel ourself. It shouldn't deadlock. */
215 if (bpf_timer_cancel(&val->timer) != -EDEADLK)
216 err |= 128;
217
218 /* delete this key and this timer anyway.
219 * It shouldn't deadlock either.
220 */
221 bpf_map_delete_elem(map, key);
222
223 ok |= 4;
224 }
225 return 0;
226}
227
228int bpf_timer_test(void)
229{
230 struct hmap_elem *val;
231 int key = HTAB, key_malloc = HTAB_MALLOC;
232
233 val = bpf_map_lookup_elem(&hmap, &key);
234 if (val) {
235 if (bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME) != 0)
236 err |= 512;
237 bpf_timer_set_callback(&val->timer, timer_cb2);
238 bpf_timer_start(&val->timer, 1000, 0);
239 }
240 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
241 if (val) {
242 if (bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME) != 0)
243 err |= 1024;
244 bpf_timer_set_callback(&val->timer, timer_cb2);
245 bpf_timer_start(&val->timer, 1000, 0);
246 }
247 return 0;
248}
249
250SEC("fentry/bpf_fentry_test2")
251int BPF_PROG2(test2, int, a, int, b)
252{
253 struct hmap_elem init = {}, *val;
254 int key = HTAB, key_malloc = HTAB_MALLOC;
255
256 init.counter = 10; /* number of times to trigger timer_cb2 */
257 bpf_map_update_elem(&hmap, &key, &init, 0);
258 val = bpf_map_lookup_elem(&hmap, &key);
259 if (val)
260 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
261 /* update the same key to free the timer */
262 bpf_map_update_elem(&hmap, &key, &init, 0);
263
264 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
265 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
266 if (val)
267 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
268 /* update the same key to free the timer */
269 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
270
271 /* init more timers to check that htab operations
272 * don't leak timer memory.
273 */
274 key = 0;
275 bpf_map_update_elem(&hmap, &key, &init, 0);
276 val = bpf_map_lookup_elem(&hmap, &key);
277 if (val)
278 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
279 bpf_map_delete_elem(&hmap, &key);
280 bpf_map_update_elem(&hmap, &key, &init, 0);
281 val = bpf_map_lookup_elem(&hmap, &key);
282 if (val)
283 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
284
285 /* and with non-prealloc htab */
286 key_malloc = 0;
287 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
288 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
289 if (val)
290 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
291 bpf_map_delete_elem(&hmap_malloc, &key_malloc);
292 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
293 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
294 if (val)
295 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
296
297 return bpf_timer_test();
298}
299
300/* callback for absolute timer */
301static int timer_cb3(void *map, int *key, struct bpf_timer *timer)
302{
303 abs_data += 6;
304
305 if (abs_data < 12) {
306 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
307 BPF_F_TIMER_ABS);
308 } else {
309 /* Re-arm timer ~35 seconds in future */
310 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + (1ull << 35),
311 BPF_F_TIMER_ABS);
312 }
313
314 return 0;
315}
316
317SEC("fentry/bpf_fentry_test3")
318int BPF_PROG2(test3, int, a)
319{
320 int key = 0;
321 struct bpf_timer *timer;
322
323 bpf_printk("test3");
324
325 timer = bpf_map_lookup_elem(&abs_timer, &key);
326 if (timer) {
327 if (bpf_timer_init(timer, &abs_timer, CLOCK_BOOTTIME) != 0)
328 err |= 2048;
329 bpf_timer_set_callback(timer, timer_cb3);
330 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
331 BPF_F_TIMER_ABS);
332 }
333
334 return 0;
335}
336
337/* callback for pinned timer */
338static int timer_cb_pinned(void *map, int *key, struct bpf_timer *timer)
339{
340 __s32 cpu = bpf_get_smp_processor_id();
341
342 if (cpu != pinned_cpu)
343 err |= 16384;
344
345 pinned_callback_check++;
346 return 0;
347}
348
349static void test_pinned_timer(bool soft)
350{
351 int key = 0;
352 void *map;
353 struct bpf_timer *timer;
354 __u64 flags = BPF_F_TIMER_CPU_PIN;
355 __u64 start_time;
356
357 if (soft) {
358 map = &soft_timer_pinned;
359 start_time = 0;
360 } else {
361 map = &abs_timer_pinned;
362 start_time = bpf_ktime_get_boot_ns();
363 flags |= BPF_F_TIMER_ABS;
364 }
365
366 timer = bpf_map_lookup_elem(map, &key);
367 if (timer) {
368 if (bpf_timer_init(timer, map, CLOCK_BOOTTIME) != 0)
369 err |= 4096;
370 bpf_timer_set_callback(timer, timer_cb_pinned);
371 pinned_cpu = bpf_get_smp_processor_id();
372 bpf_timer_start(timer, start_time + 1000, flags);
373 } else {
374 err |= 8192;
375 }
376}
377
378SEC("fentry/bpf_fentry_test4")
379int BPF_PROG2(test4, int, a)
380{
381 bpf_printk("test4");
382 test_pinned_timer(true);
383
384 return 0;
385}
386
387SEC("fentry/bpf_fentry_test5")
388int BPF_PROG2(test5, int, a)
389{
390 bpf_printk("test5");
391 test_pinned_timer(false);
392
393 return 0;
394}
395
396static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer *timer)
397{
398 bpf_timer_start(timer, 1000000, 0);
399 return 0;
400}
401
402SEC("syscall")
403int race(void *ctx)
404{
405 struct bpf_timer *timer;
406 int err, race_key = 0;
407 struct elem init;
408
409 __builtin_memset(&init, 0, sizeof(struct elem));
410 bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY);
411
412 timer = bpf_map_lookup_elem(&race_array, &race_key);
413 if (!timer)
414 return 1;
415
416 err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
417 if (err && err != -EBUSY)
418 return 1;
419
420 bpf_timer_set_callback(timer, race_timer_callback);
421 bpf_timer_start(timer, 0, 0);
422 bpf_timer_cancel(timer);
423
424 return 0;
425}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2021 Facebook */
3#include <linux/bpf.h>
4#include <time.h>
5#include <errno.h>
6#include <bpf/bpf_helpers.h>
7#include "bpf_tcp_helpers.h"
8
9char _license[] SEC("license") = "GPL";
10struct hmap_elem {
11 int counter;
12 struct bpf_timer timer;
13 struct bpf_spin_lock lock; /* unused */
14};
15
16struct {
17 __uint(type, BPF_MAP_TYPE_HASH);
18 __uint(max_entries, 1000);
19 __type(key, int);
20 __type(value, struct hmap_elem);
21} hmap SEC(".maps");
22
23struct {
24 __uint(type, BPF_MAP_TYPE_HASH);
25 __uint(map_flags, BPF_F_NO_PREALLOC);
26 __uint(max_entries, 1000);
27 __type(key, int);
28 __type(value, struct hmap_elem);
29} hmap_malloc SEC(".maps");
30
31struct elem {
32 struct bpf_timer t;
33};
34
35struct {
36 __uint(type, BPF_MAP_TYPE_ARRAY);
37 __uint(max_entries, 2);
38 __type(key, int);
39 __type(value, struct elem);
40} array SEC(".maps");
41
42struct {
43 __uint(type, BPF_MAP_TYPE_LRU_HASH);
44 __uint(max_entries, 4);
45 __type(key, int);
46 __type(value, struct elem);
47} lru SEC(".maps");
48
49struct {
50 __uint(type, BPF_MAP_TYPE_ARRAY);
51 __uint(max_entries, 1);
52 __type(key, int);
53 __type(value, struct elem);
54} abs_timer SEC(".maps"), soft_timer_pinned SEC(".maps"), abs_timer_pinned SEC(".maps"),
55 race_array SEC(".maps");
56
57__u64 bss_data;
58__u64 abs_data;
59__u64 err;
60__u64 ok;
61__u64 callback_check = 52;
62__u64 callback2_check = 52;
63__u64 pinned_callback_check;
64__s32 pinned_cpu;
65
66#define ARRAY 1
67#define HTAB 2
68#define HTAB_MALLOC 3
69#define LRU 4
70
71/* callback for array and lru timers */
72static int timer_cb1(void *map, int *key, struct bpf_timer *timer)
73{
74 /* increment bss variable twice.
75 * Once via array timer callback and once via lru timer callback
76 */
77 bss_data += 5;
78
79 /* *key == 0 - the callback was called for array timer.
80 * *key == 4 - the callback was called from lru timer.
81 */
82 if (*key == ARRAY) {
83 struct bpf_timer *lru_timer;
84 int lru_key = LRU;
85
86 /* rearm array timer to be called again in ~35 seconds */
87 if (bpf_timer_start(timer, 1ull << 35, 0) != 0)
88 err |= 1;
89
90 lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
91 if (!lru_timer)
92 return 0;
93 bpf_timer_set_callback(lru_timer, timer_cb1);
94 if (bpf_timer_start(lru_timer, 0, 0) != 0)
95 err |= 2;
96 } else if (*key == LRU) {
97 int lru_key, i;
98
99 for (i = LRU + 1;
100 i <= 100 /* for current LRU eviction algorithm this number
101 * should be larger than ~ lru->max_entries * 2
102 */;
103 i++) {
104 struct elem init = {};
105
106 /* lru_key cannot be used as loop induction variable
107 * otherwise the loop will be unbounded.
108 */
109 lru_key = i;
110
111 /* add more elements into lru map to push out current
112 * element and force deletion of this timer
113 */
114 bpf_map_update_elem(map, &lru_key, &init, 0);
115 /* look it up to bump it into active list */
116 bpf_map_lookup_elem(map, &lru_key);
117
118 /* keep adding until *key changes underneath,
119 * which means that key/timer memory was reused
120 */
121 if (*key != LRU)
122 break;
123 }
124
125 /* check that the timer was removed */
126 if (bpf_timer_cancel(timer) != -EINVAL)
127 err |= 4;
128 ok |= 1;
129 }
130 return 0;
131}
132
133SEC("fentry/bpf_fentry_test1")
134int BPF_PROG2(test1, int, a)
135{
136 struct bpf_timer *arr_timer, *lru_timer;
137 struct elem init = {};
138 int lru_key = LRU;
139 int array_key = ARRAY;
140
141 arr_timer = bpf_map_lookup_elem(&array, &array_key);
142 if (!arr_timer)
143 return 0;
144 bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
145
146 bpf_map_update_elem(&lru, &lru_key, &init, 0);
147 lru_timer = bpf_map_lookup_elem(&lru, &lru_key);
148 if (!lru_timer)
149 return 0;
150 bpf_timer_init(lru_timer, &lru, CLOCK_MONOTONIC);
151
152 bpf_timer_set_callback(arr_timer, timer_cb1);
153 bpf_timer_start(arr_timer, 0 /* call timer_cb1 asap */, 0);
154
155 /* init more timers to check that array destruction
156 * doesn't leak timer memory.
157 */
158 array_key = 0;
159 arr_timer = bpf_map_lookup_elem(&array, &array_key);
160 if (!arr_timer)
161 return 0;
162 bpf_timer_init(arr_timer, &array, CLOCK_MONOTONIC);
163 return 0;
164}
165
166/* callback for prealloc and non-prealloca hashtab timers */
167static int timer_cb2(void *map, int *key, struct hmap_elem *val)
168{
169 if (*key == HTAB)
170 callback_check--;
171 else
172 callback2_check--;
173 if (val->counter > 0 && --val->counter) {
174 /* re-arm the timer again to execute after 1 usec */
175 bpf_timer_start(&val->timer, 1000, 0);
176 } else if (*key == HTAB) {
177 struct bpf_timer *arr_timer;
178 int array_key = ARRAY;
179
180 /* cancel arr_timer otherwise bpf_fentry_test1 prog
181 * will stay alive forever.
182 */
183 arr_timer = bpf_map_lookup_elem(&array, &array_key);
184 if (!arr_timer)
185 return 0;
186 if (bpf_timer_cancel(arr_timer) != 1)
187 /* bpf_timer_cancel should return 1 to indicate
188 * that arr_timer was active at this time
189 */
190 err |= 8;
191
192 /* try to cancel ourself. It shouldn't deadlock. */
193 if (bpf_timer_cancel(&val->timer) != -EDEADLK)
194 err |= 16;
195
196 /* delete this key and this timer anyway.
197 * It shouldn't deadlock either.
198 */
199 bpf_map_delete_elem(map, key);
200
201 /* in preallocated hashmap both 'key' and 'val' could have been
202 * reused to store another map element (like in LRU above),
203 * but in controlled test environment the below test works.
204 * It's not a use-after-free. The memory is owned by the map.
205 */
206 if (bpf_timer_start(&val->timer, 1000, 0) != -EINVAL)
207 err |= 32;
208 ok |= 2;
209 } else {
210 if (*key != HTAB_MALLOC)
211 err |= 64;
212
213 /* try to cancel ourself. It shouldn't deadlock. */
214 if (bpf_timer_cancel(&val->timer) != -EDEADLK)
215 err |= 128;
216
217 /* delete this key and this timer anyway.
218 * It shouldn't deadlock either.
219 */
220 bpf_map_delete_elem(map, key);
221
222 ok |= 4;
223 }
224 return 0;
225}
226
227int bpf_timer_test(void)
228{
229 struct hmap_elem *val;
230 int key = HTAB, key_malloc = HTAB_MALLOC;
231
232 val = bpf_map_lookup_elem(&hmap, &key);
233 if (val) {
234 if (bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME) != 0)
235 err |= 512;
236 bpf_timer_set_callback(&val->timer, timer_cb2);
237 bpf_timer_start(&val->timer, 1000, 0);
238 }
239 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
240 if (val) {
241 if (bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME) != 0)
242 err |= 1024;
243 bpf_timer_set_callback(&val->timer, timer_cb2);
244 bpf_timer_start(&val->timer, 1000, 0);
245 }
246 return 0;
247}
248
249SEC("fentry/bpf_fentry_test2")
250int BPF_PROG2(test2, int, a, int, b)
251{
252 struct hmap_elem init = {}, *val;
253 int key = HTAB, key_malloc = HTAB_MALLOC;
254
255 init.counter = 10; /* number of times to trigger timer_cb2 */
256 bpf_map_update_elem(&hmap, &key, &init, 0);
257 val = bpf_map_lookup_elem(&hmap, &key);
258 if (val)
259 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
260 /* update the same key to free the timer */
261 bpf_map_update_elem(&hmap, &key, &init, 0);
262
263 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
264 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
265 if (val)
266 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
267 /* update the same key to free the timer */
268 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
269
270 /* init more timers to check that htab operations
271 * don't leak timer memory.
272 */
273 key = 0;
274 bpf_map_update_elem(&hmap, &key, &init, 0);
275 val = bpf_map_lookup_elem(&hmap, &key);
276 if (val)
277 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
278 bpf_map_delete_elem(&hmap, &key);
279 bpf_map_update_elem(&hmap, &key, &init, 0);
280 val = bpf_map_lookup_elem(&hmap, &key);
281 if (val)
282 bpf_timer_init(&val->timer, &hmap, CLOCK_BOOTTIME);
283
284 /* and with non-prealloc htab */
285 key_malloc = 0;
286 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
287 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
288 if (val)
289 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
290 bpf_map_delete_elem(&hmap_malloc, &key_malloc);
291 bpf_map_update_elem(&hmap_malloc, &key_malloc, &init, 0);
292 val = bpf_map_lookup_elem(&hmap_malloc, &key_malloc);
293 if (val)
294 bpf_timer_init(&val->timer, &hmap_malloc, CLOCK_BOOTTIME);
295
296 return bpf_timer_test();
297}
298
299/* callback for absolute timer */
300static int timer_cb3(void *map, int *key, struct bpf_timer *timer)
301{
302 abs_data += 6;
303
304 if (abs_data < 12) {
305 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
306 BPF_F_TIMER_ABS);
307 } else {
308 /* Re-arm timer ~35 seconds in future */
309 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + (1ull << 35),
310 BPF_F_TIMER_ABS);
311 }
312
313 return 0;
314}
315
316SEC("fentry/bpf_fentry_test3")
317int BPF_PROG2(test3, int, a)
318{
319 int key = 0;
320 struct bpf_timer *timer;
321
322 bpf_printk("test3");
323
324 timer = bpf_map_lookup_elem(&abs_timer, &key);
325 if (timer) {
326 if (bpf_timer_init(timer, &abs_timer, CLOCK_BOOTTIME) != 0)
327 err |= 2048;
328 bpf_timer_set_callback(timer, timer_cb3);
329 bpf_timer_start(timer, bpf_ktime_get_boot_ns() + 1000,
330 BPF_F_TIMER_ABS);
331 }
332
333 return 0;
334}
335
336/* callback for pinned timer */
337static int timer_cb_pinned(void *map, int *key, struct bpf_timer *timer)
338{
339 __s32 cpu = bpf_get_smp_processor_id();
340
341 if (cpu != pinned_cpu)
342 err |= 16384;
343
344 pinned_callback_check++;
345 return 0;
346}
347
348static void test_pinned_timer(bool soft)
349{
350 int key = 0;
351 void *map;
352 struct bpf_timer *timer;
353 __u64 flags = BPF_F_TIMER_CPU_PIN;
354 __u64 start_time;
355
356 if (soft) {
357 map = &soft_timer_pinned;
358 start_time = 0;
359 } else {
360 map = &abs_timer_pinned;
361 start_time = bpf_ktime_get_boot_ns();
362 flags |= BPF_F_TIMER_ABS;
363 }
364
365 timer = bpf_map_lookup_elem(map, &key);
366 if (timer) {
367 if (bpf_timer_init(timer, map, CLOCK_BOOTTIME) != 0)
368 err |= 4096;
369 bpf_timer_set_callback(timer, timer_cb_pinned);
370 pinned_cpu = bpf_get_smp_processor_id();
371 bpf_timer_start(timer, start_time + 1000, flags);
372 } else {
373 err |= 8192;
374 }
375}
376
377SEC("fentry/bpf_fentry_test4")
378int BPF_PROG2(test4, int, a)
379{
380 bpf_printk("test4");
381 test_pinned_timer(true);
382
383 return 0;
384}
385
386SEC("fentry/bpf_fentry_test5")
387int BPF_PROG2(test5, int, a)
388{
389 bpf_printk("test5");
390 test_pinned_timer(false);
391
392 return 0;
393}
394
395static int race_timer_callback(void *race_array, int *race_key, struct bpf_timer *timer)
396{
397 bpf_timer_start(timer, 1000000, 0);
398 return 0;
399}
400
401SEC("syscall")
402int race(void *ctx)
403{
404 struct bpf_timer *timer;
405 int err, race_key = 0;
406 struct elem init;
407
408 __builtin_memset(&init, 0, sizeof(struct elem));
409 bpf_map_update_elem(&race_array, &race_key, &init, BPF_ANY);
410
411 timer = bpf_map_lookup_elem(&race_array, &race_key);
412 if (!timer)
413 return 1;
414
415 err = bpf_timer_init(timer, &race_array, CLOCK_MONOTONIC);
416 if (err && err != -EBUSY)
417 return 1;
418
419 bpf_timer_set_callback(timer, race_timer_callback);
420 bpf_timer_start(timer, 0, 0);
421 bpf_timer_cancel(timer);
422
423 return 0;
424}