Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <vmlinux.h>
3#include <bpf/bpf_tracing.h>
4#include <bpf/bpf_helpers.h>
5#include "../bpf_testmod/bpf_testmod_kfunc.h"
6
7struct map_value {
8 struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
9 struct prog_test_ref_kfunc __kptr *ref_ptr;
10};
11
12struct array_map {
13 __uint(type, BPF_MAP_TYPE_ARRAY);
14 __type(key, int);
15 __type(value, struct map_value);
16 __uint(max_entries, 1);
17} array_map SEC(".maps");
18
19struct pcpu_array_map {
20 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
21 __type(key, int);
22 __type(value, struct map_value);
23 __uint(max_entries, 1);
24} pcpu_array_map SEC(".maps");
25
26struct hash_map {
27 __uint(type, BPF_MAP_TYPE_HASH);
28 __type(key, int);
29 __type(value, struct map_value);
30 __uint(max_entries, 1);
31} hash_map SEC(".maps");
32
33struct pcpu_hash_map {
34 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
35 __type(key, int);
36 __type(value, struct map_value);
37 __uint(max_entries, 1);
38} pcpu_hash_map SEC(".maps");
39
40struct hash_malloc_map {
41 __uint(type, BPF_MAP_TYPE_HASH);
42 __type(key, int);
43 __type(value, struct map_value);
44 __uint(max_entries, 1);
45 __uint(map_flags, BPF_F_NO_PREALLOC);
46} hash_malloc_map SEC(".maps");
47
48struct pcpu_hash_malloc_map {
49 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
50 __type(key, int);
51 __type(value, struct map_value);
52 __uint(max_entries, 1);
53 __uint(map_flags, BPF_F_NO_PREALLOC);
54} pcpu_hash_malloc_map SEC(".maps");
55
56struct lru_hash_map {
57 __uint(type, BPF_MAP_TYPE_LRU_HASH);
58 __type(key, int);
59 __type(value, struct map_value);
60 __uint(max_entries, 1);
61} lru_hash_map SEC(".maps");
62
63struct lru_pcpu_hash_map {
64 __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
65 __type(key, int);
66 __type(value, struct map_value);
67 __uint(max_entries, 1);
68} lru_pcpu_hash_map SEC(".maps");
69
70struct cgrp_ls_map {
71 __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
72 __uint(map_flags, BPF_F_NO_PREALLOC);
73 __type(key, int);
74 __type(value, struct map_value);
75} cgrp_ls_map SEC(".maps");
76
77struct task_ls_map {
78 __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
79 __uint(map_flags, BPF_F_NO_PREALLOC);
80 __type(key, int);
81 __type(value, struct map_value);
82} task_ls_map SEC(".maps");
83
84struct inode_ls_map {
85 __uint(type, BPF_MAP_TYPE_INODE_STORAGE);
86 __uint(map_flags, BPF_F_NO_PREALLOC);
87 __type(key, int);
88 __type(value, struct map_value);
89} inode_ls_map SEC(".maps");
90
91struct sk_ls_map {
92 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
93 __uint(map_flags, BPF_F_NO_PREALLOC);
94 __type(key, int);
95 __type(value, struct map_value);
96} sk_ls_map SEC(".maps");
97
98#define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \
99 struct { \
100 __uint(type, map_type); \
101 __uint(max_entries, 1); \
102 __uint(key_size, sizeof(int)); \
103 __uint(value_size, sizeof(int)); \
104 __array(values, struct inner_map_type); \
105 } name SEC(".maps") = { \
106 .values = { [0] = &inner_map_type }, \
107 }
108
109DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
110DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
111DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
112DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
113DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_array_map, array_of_pcpu_array_maps);
114DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_hash_map, array_of_pcpu_hash_maps);
115DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
116DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
117DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
118DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
119DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_array_map, hash_of_pcpu_array_maps);
120DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_hash_map, hash_of_pcpu_hash_maps);
121
122#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
123
124static void test_kptr_unref(struct map_value *v)
125{
126 struct prog_test_ref_kfunc *p;
127
128 p = v->unref_ptr;
129 /* store untrusted_ptr_or_null_ */
130 WRITE_ONCE(v->unref_ptr, p);
131 if (!p)
132 return;
133 if (p->a + p->b > 100)
134 return;
135 /* store untrusted_ptr_ */
136 WRITE_ONCE(v->unref_ptr, p);
137 /* store NULL */
138 WRITE_ONCE(v->unref_ptr, NULL);
139}
140
141static void test_kptr_ref(struct map_value *v)
142{
143 struct prog_test_ref_kfunc *p;
144
145 p = v->ref_ptr;
146 /* store ptr_or_null_ */
147 WRITE_ONCE(v->unref_ptr, p);
148 if (!p)
149 return;
150 /*
151 * p is rcu_ptr_prog_test_ref_kfunc,
152 * because bpf prog is non-sleepable and runs in RCU CS.
153 * p can be passed to kfunc that requires KF_RCU.
154 */
155 bpf_kfunc_call_test_ref(p);
156 if (p->a + p->b > 100)
157 return;
158 /* store NULL */
159 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
160 if (!p)
161 return;
162 /*
163 * p is trusted_ptr_prog_test_ref_kfunc.
164 * p can be passed to kfunc that requires KF_RCU.
165 */
166 bpf_kfunc_call_test_ref(p);
167 if (p->a + p->b > 100) {
168 bpf_kfunc_call_test_release(p);
169 return;
170 }
171 /* store ptr_ */
172 WRITE_ONCE(v->unref_ptr, p);
173 bpf_kfunc_call_test_release(p);
174
175 p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
176 if (!p)
177 return;
178 /* store ptr_ */
179 p = bpf_kptr_xchg(&v->ref_ptr, p);
180 if (!p)
181 return;
182 if (p->a + p->b > 100) {
183 bpf_kfunc_call_test_release(p);
184 return;
185 }
186 bpf_kfunc_call_test_release(p);
187}
188
189static void test_kptr(struct map_value *v)
190{
191 test_kptr_unref(v);
192 test_kptr_ref(v);
193}
194
195SEC("tc")
196int test_map_kptr(struct __sk_buff *ctx)
197{
198 struct map_value *v;
199 int key = 0;
200
201#define TEST(map) \
202 v = bpf_map_lookup_elem(&map, &key); \
203 if (!v) \
204 return 0; \
205 test_kptr(v)
206
207 TEST(array_map);
208 TEST(hash_map);
209 TEST(hash_malloc_map);
210 TEST(lru_hash_map);
211 TEST(pcpu_array_map);
212 TEST(pcpu_hash_map);
213
214#undef TEST
215 return 0;
216}
217
218SEC("tp_btf/cgroup_mkdir")
219int BPF_PROG(test_cgrp_map_kptr, struct cgroup *cgrp, const char *path)
220{
221 struct map_value *v;
222
223 v = bpf_cgrp_storage_get(&cgrp_ls_map, cgrp, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
224 if (v)
225 test_kptr(v);
226 return 0;
227}
228
229SEC("lsm/inode_unlink")
230int BPF_PROG(test_task_map_kptr, struct inode *inode, struct dentry *victim)
231{
232 struct task_struct *task;
233 struct map_value *v;
234
235 task = bpf_get_current_task_btf();
236 if (!task)
237 return 0;
238 v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
239 if (v)
240 test_kptr(v);
241 return 0;
242}
243
244SEC("lsm/inode_unlink")
245int BPF_PROG(test_inode_map_kptr, struct inode *inode, struct dentry *victim)
246{
247 struct map_value *v;
248
249 v = bpf_inode_storage_get(&inode_ls_map, inode, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
250 if (v)
251 test_kptr(v);
252 return 0;
253}
254
255SEC("tc")
256int test_sk_map_kptr(struct __sk_buff *ctx)
257{
258 struct map_value *v;
259 struct bpf_sock *sk;
260
261 sk = ctx->sk;
262 if (!sk)
263 return 0;
264 v = bpf_sk_storage_get(&sk_ls_map, sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
265 if (v)
266 test_kptr(v);
267 return 0;
268}
269
270SEC("tc")
271int test_map_in_map_kptr(struct __sk_buff *ctx)
272{
273 struct map_value *v;
274 int key = 0;
275 void *map;
276
277#define TEST(map_in_map) \
278 map = bpf_map_lookup_elem(&map_in_map, &key); \
279 if (!map) \
280 return 0; \
281 v = bpf_map_lookup_elem(map, &key); \
282 if (!v) \
283 return 0; \
284 test_kptr(v)
285
286 TEST(array_of_array_maps);
287 TEST(array_of_hash_maps);
288 TEST(array_of_hash_malloc_maps);
289 TEST(array_of_lru_hash_maps);
290 TEST(array_of_pcpu_array_maps);
291 TEST(array_of_pcpu_hash_maps);
292 TEST(hash_of_array_maps);
293 TEST(hash_of_hash_maps);
294 TEST(hash_of_hash_malloc_maps);
295 TEST(hash_of_lru_hash_maps);
296 TEST(hash_of_pcpu_array_maps);
297 TEST(hash_of_pcpu_hash_maps);
298
299#undef TEST
300 return 0;
301}
302
303int ref = 1;
304
305static __always_inline
306int test_map_kptr_ref_pre(struct map_value *v)
307{
308 struct prog_test_ref_kfunc *p, *p_st;
309 unsigned long arg = 0;
310 int ret;
311
312 p = bpf_kfunc_call_test_acquire(&arg);
313 if (!p)
314 return 1;
315 ref++;
316
317 p_st = p->next;
318 if (p_st->cnt.refs.counter != ref) {
319 ret = 2;
320 goto end;
321 }
322
323 p = bpf_kptr_xchg(&v->ref_ptr, p);
324 if (p) {
325 ret = 3;
326 goto end;
327 }
328 if (p_st->cnt.refs.counter != ref)
329 return 4;
330
331 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
332 if (!p)
333 return 5;
334 bpf_kfunc_call_test_release(p);
335 ref--;
336 if (p_st->cnt.refs.counter != ref)
337 return 6;
338
339 p = bpf_kfunc_call_test_acquire(&arg);
340 if (!p)
341 return 7;
342 ref++;
343 p = bpf_kptr_xchg(&v->ref_ptr, p);
344 if (p) {
345 ret = 8;
346 goto end;
347 }
348 if (p_st->cnt.refs.counter != ref)
349 return 9;
350 /* Leave in map */
351
352 return 0;
353end:
354 ref--;
355 bpf_kfunc_call_test_release(p);
356 return ret;
357}
358
359static __always_inline
360int test_map_kptr_ref_post(struct map_value *v)
361{
362 struct prog_test_ref_kfunc *p, *p_st;
363
364 p_st = v->ref_ptr;
365 if (!p_st || p_st->cnt.refs.counter != ref)
366 return 1;
367
368 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
369 if (!p)
370 return 2;
371 if (p_st->cnt.refs.counter != ref) {
372 bpf_kfunc_call_test_release(p);
373 return 3;
374 }
375
376 p = bpf_kptr_xchg(&v->ref_ptr, p);
377 if (p) {
378 bpf_kfunc_call_test_release(p);
379 return 4;
380 }
381 if (p_st->cnt.refs.counter != ref)
382 return 5;
383
384 return 0;
385}
386
387#define TEST(map) \
388 v = bpf_map_lookup_elem(&map, &key); \
389 if (!v) \
390 return -1; \
391 ret = test_map_kptr_ref_pre(v); \
392 if (ret) \
393 return ret;
394
395#define TEST_PCPU(map) \
396 v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
397 if (!v) \
398 return -1; \
399 ret = test_map_kptr_ref_pre(v); \
400 if (ret) \
401 return ret;
402
403SEC("tc")
404int test_map_kptr_ref1(struct __sk_buff *ctx)
405{
406 struct map_value *v, val = {};
407 int key = 0, ret;
408
409 bpf_map_update_elem(&hash_map, &key, &val, 0);
410 bpf_map_update_elem(&hash_malloc_map, &key, &val, 0);
411 bpf_map_update_elem(&lru_hash_map, &key, &val, 0);
412
413 bpf_map_update_elem(&pcpu_hash_map, &key, &val, 0);
414 bpf_map_update_elem(&pcpu_hash_malloc_map, &key, &val, 0);
415 bpf_map_update_elem(&lru_pcpu_hash_map, &key, &val, 0);
416
417 TEST(array_map);
418 TEST(hash_map);
419 TEST(hash_malloc_map);
420 TEST(lru_hash_map);
421
422 TEST_PCPU(pcpu_array_map);
423 TEST_PCPU(pcpu_hash_map);
424 TEST_PCPU(pcpu_hash_malloc_map);
425 TEST_PCPU(lru_pcpu_hash_map);
426
427 return 0;
428}
429
430#undef TEST
431#undef TEST_PCPU
432
433#define TEST(map) \
434 v = bpf_map_lookup_elem(&map, &key); \
435 if (!v) \
436 return -1; \
437 ret = test_map_kptr_ref_post(v); \
438 if (ret) \
439 return ret;
440
441#define TEST_PCPU(map) \
442 v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
443 if (!v) \
444 return -1; \
445 ret = test_map_kptr_ref_post(v); \
446 if (ret) \
447 return ret;
448
449SEC("tc")
450int test_map_kptr_ref2(struct __sk_buff *ctx)
451{
452 struct map_value *v;
453 int key = 0, ret;
454
455 TEST(array_map);
456 TEST(hash_map);
457 TEST(hash_malloc_map);
458 TEST(lru_hash_map);
459
460 TEST_PCPU(pcpu_array_map);
461 TEST_PCPU(pcpu_hash_map);
462 TEST_PCPU(pcpu_hash_malloc_map);
463 TEST_PCPU(lru_pcpu_hash_map);
464
465 return 0;
466}
467
468#undef TEST
469#undef TEST_PCPU
470
471SEC("tc")
472int test_map_kptr_ref3(struct __sk_buff *ctx)
473{
474 struct prog_test_ref_kfunc *p;
475 unsigned long sp = 0;
476
477 p = bpf_kfunc_call_test_acquire(&sp);
478 if (!p)
479 return 1;
480 ref++;
481 if (p->cnt.refs.counter != ref) {
482 bpf_kfunc_call_test_release(p);
483 return 2;
484 }
485 bpf_kfunc_call_test_release(p);
486 ref--;
487 return 0;
488}
489
490SEC("syscall")
491int test_ls_map_kptr_ref1(void *ctx)
492{
493 struct task_struct *current;
494 struct map_value *v;
495
496 current = bpf_get_current_task_btf();
497 if (!current)
498 return 100;
499 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
500 if (v)
501 return 150;
502 v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
503 if (!v)
504 return 200;
505 return test_map_kptr_ref_pre(v);
506}
507
508SEC("syscall")
509int test_ls_map_kptr_ref2(void *ctx)
510{
511 struct task_struct *current;
512 struct map_value *v;
513
514 current = bpf_get_current_task_btf();
515 if (!current)
516 return 100;
517 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
518 if (!v)
519 return 200;
520 return test_map_kptr_ref_post(v);
521}
522
523SEC("syscall")
524int test_ls_map_kptr_ref_del(void *ctx)
525{
526 struct task_struct *current;
527 struct map_value *v;
528
529 current = bpf_get_current_task_btf();
530 if (!current)
531 return 100;
532 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
533 if (!v)
534 return 200;
535 if (!v->ref_ptr)
536 return 300;
537 return bpf_task_storage_delete(&task_ls_map, current);
538}
539
540char _license[] SEC("license") = "GPL";
1// SPDX-License-Identifier: GPL-2.0
2#include <vmlinux.h>
3#include <bpf/bpf_tracing.h>
4#include <bpf/bpf_helpers.h>
5#include "../bpf_testmod/bpf_testmod_kfunc.h"
6
7struct map_value {
8 struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
9 struct prog_test_ref_kfunc __kptr *ref_ptr;
10};
11
12struct array_map {
13 __uint(type, BPF_MAP_TYPE_ARRAY);
14 __type(key, int);
15 __type(value, struct map_value);
16 __uint(max_entries, 1);
17} array_map SEC(".maps");
18
19struct pcpu_array_map {
20 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
21 __type(key, int);
22 __type(value, struct map_value);
23 __uint(max_entries, 1);
24} pcpu_array_map SEC(".maps");
25
26struct hash_map {
27 __uint(type, BPF_MAP_TYPE_HASH);
28 __type(key, int);
29 __type(value, struct map_value);
30 __uint(max_entries, 1);
31} hash_map SEC(".maps");
32
33struct pcpu_hash_map {
34 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
35 __type(key, int);
36 __type(value, struct map_value);
37 __uint(max_entries, 1);
38} pcpu_hash_map SEC(".maps");
39
40struct hash_malloc_map {
41 __uint(type, BPF_MAP_TYPE_HASH);
42 __type(key, int);
43 __type(value, struct map_value);
44 __uint(max_entries, 1);
45 __uint(map_flags, BPF_F_NO_PREALLOC);
46} hash_malloc_map SEC(".maps");
47
48struct pcpu_hash_malloc_map {
49 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
50 __type(key, int);
51 __type(value, struct map_value);
52 __uint(max_entries, 1);
53 __uint(map_flags, BPF_F_NO_PREALLOC);
54} pcpu_hash_malloc_map SEC(".maps");
55
56struct lru_hash_map {
57 __uint(type, BPF_MAP_TYPE_LRU_HASH);
58 __type(key, int);
59 __type(value, struct map_value);
60 __uint(max_entries, 1);
61} lru_hash_map SEC(".maps");
62
63struct lru_pcpu_hash_map {
64 __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
65 __type(key, int);
66 __type(value, struct map_value);
67 __uint(max_entries, 1);
68} lru_pcpu_hash_map SEC(".maps");
69
70struct cgrp_ls_map {
71 __uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
72 __uint(map_flags, BPF_F_NO_PREALLOC);
73 __type(key, int);
74 __type(value, struct map_value);
75} cgrp_ls_map SEC(".maps");
76
77struct task_ls_map {
78 __uint(type, BPF_MAP_TYPE_TASK_STORAGE);
79 __uint(map_flags, BPF_F_NO_PREALLOC);
80 __type(key, int);
81 __type(value, struct map_value);
82} task_ls_map SEC(".maps");
83
84struct inode_ls_map {
85 __uint(type, BPF_MAP_TYPE_INODE_STORAGE);
86 __uint(map_flags, BPF_F_NO_PREALLOC);
87 __type(key, int);
88 __type(value, struct map_value);
89} inode_ls_map SEC(".maps");
90
91struct sk_ls_map {
92 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
93 __uint(map_flags, BPF_F_NO_PREALLOC);
94 __type(key, int);
95 __type(value, struct map_value);
96} sk_ls_map SEC(".maps");
97
98#define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name) \
99 struct { \
100 __uint(type, map_type); \
101 __uint(max_entries, 1); \
102 __uint(key_size, sizeof(int)); \
103 __uint(value_size, sizeof(int)); \
104 __array(values, struct inner_map_type); \
105 } name SEC(".maps") = { \
106 .values = { [0] = &inner_map_type }, \
107 }
108
109DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
110DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
111DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
112DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
113DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
114DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
115DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
116DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
117
118#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
119
120static void test_kptr_unref(struct map_value *v)
121{
122 struct prog_test_ref_kfunc *p;
123
124 p = v->unref_ptr;
125 /* store untrusted_ptr_or_null_ */
126 WRITE_ONCE(v->unref_ptr, p);
127 if (!p)
128 return;
129 if (p->a + p->b > 100)
130 return;
131 /* store untrusted_ptr_ */
132 WRITE_ONCE(v->unref_ptr, p);
133 /* store NULL */
134 WRITE_ONCE(v->unref_ptr, NULL);
135}
136
137static void test_kptr_ref(struct map_value *v)
138{
139 struct prog_test_ref_kfunc *p;
140
141 p = v->ref_ptr;
142 /* store ptr_or_null_ */
143 WRITE_ONCE(v->unref_ptr, p);
144 if (!p)
145 return;
146 /*
147 * p is rcu_ptr_prog_test_ref_kfunc,
148 * because bpf prog is non-sleepable and runs in RCU CS.
149 * p can be passed to kfunc that requires KF_RCU.
150 */
151 bpf_kfunc_call_test_ref(p);
152 if (p->a + p->b > 100)
153 return;
154 /* store NULL */
155 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
156 if (!p)
157 return;
158 /*
159 * p is trusted_ptr_prog_test_ref_kfunc.
160 * p can be passed to kfunc that requires KF_RCU.
161 */
162 bpf_kfunc_call_test_ref(p);
163 if (p->a + p->b > 100) {
164 bpf_kfunc_call_test_release(p);
165 return;
166 }
167 /* store ptr_ */
168 WRITE_ONCE(v->unref_ptr, p);
169 bpf_kfunc_call_test_release(p);
170
171 p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
172 if (!p)
173 return;
174 /* store ptr_ */
175 p = bpf_kptr_xchg(&v->ref_ptr, p);
176 if (!p)
177 return;
178 if (p->a + p->b > 100) {
179 bpf_kfunc_call_test_release(p);
180 return;
181 }
182 bpf_kfunc_call_test_release(p);
183}
184
185static void test_kptr(struct map_value *v)
186{
187 test_kptr_unref(v);
188 test_kptr_ref(v);
189}
190
191SEC("tc")
192int test_map_kptr(struct __sk_buff *ctx)
193{
194 struct map_value *v;
195 int key = 0;
196
197#define TEST(map) \
198 v = bpf_map_lookup_elem(&map, &key); \
199 if (!v) \
200 return 0; \
201 test_kptr(v)
202
203 TEST(array_map);
204 TEST(hash_map);
205 TEST(hash_malloc_map);
206 TEST(lru_hash_map);
207
208#undef TEST
209 return 0;
210}
211
212SEC("tp_btf/cgroup_mkdir")
213int BPF_PROG(test_cgrp_map_kptr, struct cgroup *cgrp, const char *path)
214{
215 struct map_value *v;
216
217 v = bpf_cgrp_storage_get(&cgrp_ls_map, cgrp, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
218 if (v)
219 test_kptr(v);
220 return 0;
221}
222
223SEC("lsm/inode_unlink")
224int BPF_PROG(test_task_map_kptr, struct inode *inode, struct dentry *victim)
225{
226 struct task_struct *task;
227 struct map_value *v;
228
229 task = bpf_get_current_task_btf();
230 if (!task)
231 return 0;
232 v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
233 if (v)
234 test_kptr(v);
235 return 0;
236}
237
238SEC("lsm/inode_unlink")
239int BPF_PROG(test_inode_map_kptr, struct inode *inode, struct dentry *victim)
240{
241 struct map_value *v;
242
243 v = bpf_inode_storage_get(&inode_ls_map, inode, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
244 if (v)
245 test_kptr(v);
246 return 0;
247}
248
249SEC("tc")
250int test_sk_map_kptr(struct __sk_buff *ctx)
251{
252 struct map_value *v;
253 struct bpf_sock *sk;
254
255 sk = ctx->sk;
256 if (!sk)
257 return 0;
258 v = bpf_sk_storage_get(&sk_ls_map, sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
259 if (v)
260 test_kptr(v);
261 return 0;
262}
263
264SEC("tc")
265int test_map_in_map_kptr(struct __sk_buff *ctx)
266{
267 struct map_value *v;
268 int key = 0;
269 void *map;
270
271#define TEST(map_in_map) \
272 map = bpf_map_lookup_elem(&map_in_map, &key); \
273 if (!map) \
274 return 0; \
275 v = bpf_map_lookup_elem(map, &key); \
276 if (!v) \
277 return 0; \
278 test_kptr(v)
279
280 TEST(array_of_array_maps);
281 TEST(array_of_hash_maps);
282 TEST(array_of_hash_malloc_maps);
283 TEST(array_of_lru_hash_maps);
284 TEST(hash_of_array_maps);
285 TEST(hash_of_hash_maps);
286 TEST(hash_of_hash_malloc_maps);
287 TEST(hash_of_lru_hash_maps);
288
289#undef TEST
290 return 0;
291}
292
293int ref = 1;
294
295static __always_inline
296int test_map_kptr_ref_pre(struct map_value *v)
297{
298 struct prog_test_ref_kfunc *p, *p_st;
299 unsigned long arg = 0;
300 int ret;
301
302 p = bpf_kfunc_call_test_acquire(&arg);
303 if (!p)
304 return 1;
305 ref++;
306
307 p_st = p->next;
308 if (p_st->cnt.refs.counter != ref) {
309 ret = 2;
310 goto end;
311 }
312
313 p = bpf_kptr_xchg(&v->ref_ptr, p);
314 if (p) {
315 ret = 3;
316 goto end;
317 }
318 if (p_st->cnt.refs.counter != ref)
319 return 4;
320
321 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
322 if (!p)
323 return 5;
324 bpf_kfunc_call_test_release(p);
325 ref--;
326 if (p_st->cnt.refs.counter != ref)
327 return 6;
328
329 p = bpf_kfunc_call_test_acquire(&arg);
330 if (!p)
331 return 7;
332 ref++;
333 p = bpf_kptr_xchg(&v->ref_ptr, p);
334 if (p) {
335 ret = 8;
336 goto end;
337 }
338 if (p_st->cnt.refs.counter != ref)
339 return 9;
340 /* Leave in map */
341
342 return 0;
343end:
344 ref--;
345 bpf_kfunc_call_test_release(p);
346 return ret;
347}
348
349static __always_inline
350int test_map_kptr_ref_post(struct map_value *v)
351{
352 struct prog_test_ref_kfunc *p, *p_st;
353
354 p_st = v->ref_ptr;
355 if (!p_st || p_st->cnt.refs.counter != ref)
356 return 1;
357
358 p = bpf_kptr_xchg(&v->ref_ptr, NULL);
359 if (!p)
360 return 2;
361 if (p_st->cnt.refs.counter != ref) {
362 bpf_kfunc_call_test_release(p);
363 return 3;
364 }
365
366 p = bpf_kptr_xchg(&v->ref_ptr, p);
367 if (p) {
368 bpf_kfunc_call_test_release(p);
369 return 4;
370 }
371 if (p_st->cnt.refs.counter != ref)
372 return 5;
373
374 return 0;
375}
376
377#define TEST(map) \
378 v = bpf_map_lookup_elem(&map, &key); \
379 if (!v) \
380 return -1; \
381 ret = test_map_kptr_ref_pre(v); \
382 if (ret) \
383 return ret;
384
385#define TEST_PCPU(map) \
386 v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
387 if (!v) \
388 return -1; \
389 ret = test_map_kptr_ref_pre(v); \
390 if (ret) \
391 return ret;
392
393SEC("tc")
394int test_map_kptr_ref1(struct __sk_buff *ctx)
395{
396 struct map_value *v, val = {};
397 int key = 0, ret;
398
399 bpf_map_update_elem(&hash_map, &key, &val, 0);
400 bpf_map_update_elem(&hash_malloc_map, &key, &val, 0);
401 bpf_map_update_elem(&lru_hash_map, &key, &val, 0);
402
403 bpf_map_update_elem(&pcpu_hash_map, &key, &val, 0);
404 bpf_map_update_elem(&pcpu_hash_malloc_map, &key, &val, 0);
405 bpf_map_update_elem(&lru_pcpu_hash_map, &key, &val, 0);
406
407 TEST(array_map);
408 TEST(hash_map);
409 TEST(hash_malloc_map);
410 TEST(lru_hash_map);
411
412 TEST_PCPU(pcpu_array_map);
413 TEST_PCPU(pcpu_hash_map);
414 TEST_PCPU(pcpu_hash_malloc_map);
415 TEST_PCPU(lru_pcpu_hash_map);
416
417 return 0;
418}
419
420#undef TEST
421#undef TEST_PCPU
422
423#define TEST(map) \
424 v = bpf_map_lookup_elem(&map, &key); \
425 if (!v) \
426 return -1; \
427 ret = test_map_kptr_ref_post(v); \
428 if (ret) \
429 return ret;
430
431#define TEST_PCPU(map) \
432 v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
433 if (!v) \
434 return -1; \
435 ret = test_map_kptr_ref_post(v); \
436 if (ret) \
437 return ret;
438
439SEC("tc")
440int test_map_kptr_ref2(struct __sk_buff *ctx)
441{
442 struct map_value *v;
443 int key = 0, ret;
444
445 TEST(array_map);
446 TEST(hash_map);
447 TEST(hash_malloc_map);
448 TEST(lru_hash_map);
449
450 TEST_PCPU(pcpu_array_map);
451 TEST_PCPU(pcpu_hash_map);
452 TEST_PCPU(pcpu_hash_malloc_map);
453 TEST_PCPU(lru_pcpu_hash_map);
454
455 return 0;
456}
457
458#undef TEST
459#undef TEST_PCPU
460
461SEC("tc")
462int test_map_kptr_ref3(struct __sk_buff *ctx)
463{
464 struct prog_test_ref_kfunc *p;
465 unsigned long sp = 0;
466
467 p = bpf_kfunc_call_test_acquire(&sp);
468 if (!p)
469 return 1;
470 ref++;
471 if (p->cnt.refs.counter != ref) {
472 bpf_kfunc_call_test_release(p);
473 return 2;
474 }
475 bpf_kfunc_call_test_release(p);
476 ref--;
477 return 0;
478}
479
480SEC("syscall")
481int test_ls_map_kptr_ref1(void *ctx)
482{
483 struct task_struct *current;
484 struct map_value *v;
485
486 current = bpf_get_current_task_btf();
487 if (!current)
488 return 100;
489 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
490 if (v)
491 return 150;
492 v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
493 if (!v)
494 return 200;
495 return test_map_kptr_ref_pre(v);
496}
497
498SEC("syscall")
499int test_ls_map_kptr_ref2(void *ctx)
500{
501 struct task_struct *current;
502 struct map_value *v;
503
504 current = bpf_get_current_task_btf();
505 if (!current)
506 return 100;
507 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
508 if (!v)
509 return 200;
510 return test_map_kptr_ref_post(v);
511}
512
513SEC("syscall")
514int test_ls_map_kptr_ref_del(void *ctx)
515{
516 struct task_struct *current;
517 struct map_value *v;
518
519 current = bpf_get_current_task_btf();
520 if (!current)
521 return 100;
522 v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
523 if (!v)
524 return 200;
525 if (!v->ref_ptr)
526 return 300;
527 return bpf_task_storage_delete(&task_ls_map, current);
528}
529
530char _license[] SEC("license") = "GPL";