Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2#include <vmlinux.h>
  3#include <bpf/bpf_tracing.h>
  4#include <bpf/bpf_helpers.h>
  5#include "../bpf_testmod/bpf_testmod_kfunc.h"
  6
  7struct map_value {
  8	struct prog_test_ref_kfunc __kptr_untrusted *unref_ptr;
  9	struct prog_test_ref_kfunc __kptr *ref_ptr;
 10};
 11
 12struct array_map {
 13	__uint(type, BPF_MAP_TYPE_ARRAY);
 14	__type(key, int);
 15	__type(value, struct map_value);
 16	__uint(max_entries, 1);
 17} array_map SEC(".maps");
 18
 19struct pcpu_array_map {
 20	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
 21	__type(key, int);
 22	__type(value, struct map_value);
 23	__uint(max_entries, 1);
 24} pcpu_array_map SEC(".maps");
 25
 26struct hash_map {
 27	__uint(type, BPF_MAP_TYPE_HASH);
 28	__type(key, int);
 29	__type(value, struct map_value);
 30	__uint(max_entries, 1);
 31} hash_map SEC(".maps");
 32
 33struct pcpu_hash_map {
 34	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
 35	__type(key, int);
 36	__type(value, struct map_value);
 37	__uint(max_entries, 1);
 38} pcpu_hash_map SEC(".maps");
 39
 40struct hash_malloc_map {
 41	__uint(type, BPF_MAP_TYPE_HASH);
 42	__type(key, int);
 43	__type(value, struct map_value);
 44	__uint(max_entries, 1);
 45	__uint(map_flags, BPF_F_NO_PREALLOC);
 46} hash_malloc_map SEC(".maps");
 47
 48struct pcpu_hash_malloc_map {
 49	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
 50	__type(key, int);
 51	__type(value, struct map_value);
 52	__uint(max_entries, 1);
 53	__uint(map_flags, BPF_F_NO_PREALLOC);
 54} pcpu_hash_malloc_map SEC(".maps");
 55
 56struct lru_hash_map {
 57	__uint(type, BPF_MAP_TYPE_LRU_HASH);
 58	__type(key, int);
 59	__type(value, struct map_value);
 60	__uint(max_entries, 1);
 61} lru_hash_map SEC(".maps");
 62
 63struct lru_pcpu_hash_map {
 64	__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
 65	__type(key, int);
 66	__type(value, struct map_value);
 67	__uint(max_entries, 1);
 68} lru_pcpu_hash_map SEC(".maps");
 69
 70struct cgrp_ls_map {
 71	__uint(type, BPF_MAP_TYPE_CGRP_STORAGE);
 72	__uint(map_flags, BPF_F_NO_PREALLOC);
 73	__type(key, int);
 74	__type(value, struct map_value);
 75} cgrp_ls_map SEC(".maps");
 76
 77struct task_ls_map {
 78	__uint(type, BPF_MAP_TYPE_TASK_STORAGE);
 79	__uint(map_flags, BPF_F_NO_PREALLOC);
 80	__type(key, int);
 81	__type(value, struct map_value);
 82} task_ls_map SEC(".maps");
 83
 84struct inode_ls_map {
 85	__uint(type, BPF_MAP_TYPE_INODE_STORAGE);
 86	__uint(map_flags, BPF_F_NO_PREALLOC);
 87	__type(key, int);
 88	__type(value, struct map_value);
 89} inode_ls_map SEC(".maps");
 90
 91struct sk_ls_map {
 92	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
 93	__uint(map_flags, BPF_F_NO_PREALLOC);
 94	__type(key, int);
 95	__type(value, struct map_value);
 96} sk_ls_map SEC(".maps");
 97
 98#define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name)       \
 99	struct {                                                \
100		__uint(type, map_type);                         \
101		__uint(max_entries, 1);                         \
102		__uint(key_size, sizeof(int));                  \
103		__uint(value_size, sizeof(int));                \
104		__array(values, struct inner_map_type);         \
105	} name SEC(".maps") = {                                 \
106		.values = { [0] = &inner_map_type },            \
107	}
108
109DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
110DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
111DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
112DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
113DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_array_map, array_of_pcpu_array_maps);
114DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, pcpu_hash_map, array_of_pcpu_hash_maps);
115DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
116DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
117DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
118DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
119DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_array_map, hash_of_pcpu_array_maps);
120DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, pcpu_hash_map, hash_of_pcpu_hash_maps);
121
122#define WRITE_ONCE(x, val) ((*(volatile typeof(x) *) &(x)) = (val))
 
 
 
123
124static void test_kptr_unref(struct map_value *v)
125{
126	struct prog_test_ref_kfunc *p;
127
128	p = v->unref_ptr;
129	/* store untrusted_ptr_or_null_ */
130	WRITE_ONCE(v->unref_ptr, p);
131	if (!p)
132		return;
133	if (p->a + p->b > 100)
134		return;
135	/* store untrusted_ptr_ */
136	WRITE_ONCE(v->unref_ptr, p);
137	/* store NULL */
138	WRITE_ONCE(v->unref_ptr, NULL);
139}
140
141static void test_kptr_ref(struct map_value *v)
142{
143	struct prog_test_ref_kfunc *p;
144
145	p = v->ref_ptr;
146	/* store ptr_or_null_ */
147	WRITE_ONCE(v->unref_ptr, p);
148	if (!p)
149		return;
150	/*
151	 * p is rcu_ptr_prog_test_ref_kfunc,
152	 * because bpf prog is non-sleepable and runs in RCU CS.
153	 * p can be passed to kfunc that requires KF_RCU.
154	 */
155	bpf_kfunc_call_test_ref(p);
156	if (p->a + p->b > 100)
157		return;
158	/* store NULL */
159	p = bpf_kptr_xchg(&v->ref_ptr, NULL);
160	if (!p)
161		return;
162	/*
163	 * p is trusted_ptr_prog_test_ref_kfunc.
164	 * p can be passed to kfunc that requires KF_RCU.
165	 */
166	bpf_kfunc_call_test_ref(p);
167	if (p->a + p->b > 100) {
168		bpf_kfunc_call_test_release(p);
169		return;
170	}
171	/* store ptr_ */
172	WRITE_ONCE(v->unref_ptr, p);
173	bpf_kfunc_call_test_release(p);
174
175	p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
176	if (!p)
177		return;
178	/* store ptr_ */
179	p = bpf_kptr_xchg(&v->ref_ptr, p);
180	if (!p)
181		return;
182	if (p->a + p->b > 100) {
183		bpf_kfunc_call_test_release(p);
184		return;
185	}
186	bpf_kfunc_call_test_release(p);
187}
188
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189static void test_kptr(struct map_value *v)
190{
191	test_kptr_unref(v);
192	test_kptr_ref(v);
 
193}
194
195SEC("tc")
196int test_map_kptr(struct __sk_buff *ctx)
197{
198	struct map_value *v;
199	int key = 0;
200
201#define TEST(map)					\
202	v = bpf_map_lookup_elem(&map, &key);		\
203	if (!v)						\
204		return 0;				\
205	test_kptr(v)
206
207	TEST(array_map);
208	TEST(hash_map);
209	TEST(hash_malloc_map);
210	TEST(lru_hash_map);
211	TEST(pcpu_array_map);
212	TEST(pcpu_hash_map);
213
214#undef TEST
215	return 0;
216}
217
218SEC("tp_btf/cgroup_mkdir")
219int BPF_PROG(test_cgrp_map_kptr, struct cgroup *cgrp, const char *path)
220{
221	struct map_value *v;
222
223	v = bpf_cgrp_storage_get(&cgrp_ls_map, cgrp, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
224	if (v)
225		test_kptr(v);
226	return 0;
227}
228
229SEC("lsm/inode_unlink")
230int BPF_PROG(test_task_map_kptr, struct inode *inode, struct dentry *victim)
231{
232	struct task_struct *task;
233	struct map_value *v;
234
235	task = bpf_get_current_task_btf();
236	if (!task)
237		return 0;
238	v = bpf_task_storage_get(&task_ls_map, task, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
239	if (v)
240		test_kptr(v);
241	return 0;
242}
243
244SEC("lsm/inode_unlink")
245int BPF_PROG(test_inode_map_kptr, struct inode *inode, struct dentry *victim)
246{
247	struct map_value *v;
248
249	v = bpf_inode_storage_get(&inode_ls_map, inode, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
250	if (v)
251		test_kptr(v);
252	return 0;
253}
254
255SEC("tc")
256int test_sk_map_kptr(struct __sk_buff *ctx)
257{
258	struct map_value *v;
259	struct bpf_sock *sk;
260
261	sk = ctx->sk;
262	if (!sk)
263		return 0;
264	v = bpf_sk_storage_get(&sk_ls_map, sk, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
265	if (v)
266		test_kptr(v);
267	return 0;
268}
269
270SEC("tc")
271int test_map_in_map_kptr(struct __sk_buff *ctx)
272{
273	struct map_value *v;
274	int key = 0;
275	void *map;
276
277#define TEST(map_in_map)                                \
278	map = bpf_map_lookup_elem(&map_in_map, &key);   \
279	if (!map)                                       \
280		return 0;                               \
281	v = bpf_map_lookup_elem(map, &key);		\
282	if (!v)						\
283		return 0;				\
284	test_kptr(v)
285
286	TEST(array_of_array_maps);
287	TEST(array_of_hash_maps);
288	TEST(array_of_hash_malloc_maps);
289	TEST(array_of_lru_hash_maps);
290	TEST(array_of_pcpu_array_maps);
291	TEST(array_of_pcpu_hash_maps);
292	TEST(hash_of_array_maps);
293	TEST(hash_of_hash_maps);
294	TEST(hash_of_hash_malloc_maps);
295	TEST(hash_of_lru_hash_maps);
296	TEST(hash_of_pcpu_array_maps);
297	TEST(hash_of_pcpu_hash_maps);
298
299#undef TEST
300	return 0;
301}
302
303int ref = 1;
304
305static __always_inline
306int test_map_kptr_ref_pre(struct map_value *v)
307{
308	struct prog_test_ref_kfunc *p, *p_st;
309	unsigned long arg = 0;
310	int ret;
 
311
312	p = bpf_kfunc_call_test_acquire(&arg);
313	if (!p)
314		return 1;
315	ref++;
316
317	p_st = p->next;
318	if (p_st->cnt.refs.counter != ref) {
319		ret = 2;
320		goto end;
321	}
322
 
 
 
 
 
 
323	p = bpf_kptr_xchg(&v->ref_ptr, p);
324	if (p) {
325		ret = 3;
326		goto end;
327	}
328	if (p_st->cnt.refs.counter != ref)
329		return 4;
 
 
 
 
 
 
 
 
 
 
 
330
331	p = bpf_kptr_xchg(&v->ref_ptr, NULL);
332	if (!p)
333		return 5;
334	bpf_kfunc_call_test_release(p);
335	ref--;
336	if (p_st->cnt.refs.counter != ref)
337		return 6;
338
339	p = bpf_kfunc_call_test_acquire(&arg);
340	if (!p)
341		return 7;
342	ref++;
343	p = bpf_kptr_xchg(&v->ref_ptr, p);
344	if (p) {
345		ret = 8;
346		goto end;
347	}
348	if (p_st->cnt.refs.counter != ref)
349		return 9;
350	/* Leave in map */
351
352	return 0;
353end:
354	ref--;
355	bpf_kfunc_call_test_release(p);
356	return ret;
357}
358
359static __always_inline
360int test_map_kptr_ref_post(struct map_value *v)
361{
362	struct prog_test_ref_kfunc *p, *p_st;
 
 
363
364	p_st = v->ref_ptr;
365	if (!p_st || p_st->cnt.refs.counter != ref)
366		return 1;
367
 
 
 
 
368	p = bpf_kptr_xchg(&v->ref_ptr, NULL);
369	if (!p)
370		return 2;
371	if (p_st->cnt.refs.counter != ref) {
372		bpf_kfunc_call_test_release(p);
373		return 3;
 
 
 
374	}
375
376	p = bpf_kptr_xchg(&v->ref_ptr, p);
377	if (p) {
378		bpf_kfunc_call_test_release(p);
379		return 4;
380	}
381	if (p_st->cnt.refs.counter != ref)
382		return 5;
383
384	return 0;
385}
386
387#define TEST(map)                            \
388	v = bpf_map_lookup_elem(&map, &key); \
389	if (!v)                              \
390		return -1;                   \
391	ret = test_map_kptr_ref_pre(v);      \
392	if (ret)                             \
393		return ret;
394
395#define TEST_PCPU(map)                                 \
396	v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
397	if (!v)                                        \
398		return -1;                             \
399	ret = test_map_kptr_ref_pre(v);                \
400	if (ret)                                       \
401		return ret;
402
403SEC("tc")
404int test_map_kptr_ref1(struct __sk_buff *ctx)
405{
406	struct map_value *v, val = {};
407	int key = 0, ret;
408
409	bpf_map_update_elem(&hash_map, &key, &val, 0);
410	bpf_map_update_elem(&hash_malloc_map, &key, &val, 0);
411	bpf_map_update_elem(&lru_hash_map, &key, &val, 0);
412
413	bpf_map_update_elem(&pcpu_hash_map, &key, &val, 0);
414	bpf_map_update_elem(&pcpu_hash_malloc_map, &key, &val, 0);
415	bpf_map_update_elem(&lru_pcpu_hash_map, &key, &val, 0);
416
417	TEST(array_map);
418	TEST(hash_map);
419	TEST(hash_malloc_map);
420	TEST(lru_hash_map);
421
422	TEST_PCPU(pcpu_array_map);
423	TEST_PCPU(pcpu_hash_map);
424	TEST_PCPU(pcpu_hash_malloc_map);
425	TEST_PCPU(lru_pcpu_hash_map);
426
427	return 0;
428}
429
430#undef TEST
431#undef TEST_PCPU
432
433#define TEST(map)                            \
434	v = bpf_map_lookup_elem(&map, &key); \
435	if (!v)                              \
436		return -1;                   \
437	ret = test_map_kptr_ref_post(v);     \
438	if (ret)                             \
439		return ret;
440
441#define TEST_PCPU(map)                                 \
442	v = bpf_map_lookup_percpu_elem(&map, &key, 0); \
443	if (!v)                                        \
444		return -1;                             \
445	ret = test_map_kptr_ref_post(v);               \
446	if (ret)                                       \
447		return ret;
448
449SEC("tc")
450int test_map_kptr_ref2(struct __sk_buff *ctx)
451{
452	struct map_value *v;
453	int key = 0, ret;
454
455	TEST(array_map);
456	TEST(hash_map);
457	TEST(hash_malloc_map);
458	TEST(lru_hash_map);
459
460	TEST_PCPU(pcpu_array_map);
461	TEST_PCPU(pcpu_hash_map);
462	TEST_PCPU(pcpu_hash_malloc_map);
463	TEST_PCPU(lru_pcpu_hash_map);
464
465	return 0;
466}
467
468#undef TEST
469#undef TEST_PCPU
470
471SEC("tc")
472int test_map_kptr_ref3(struct __sk_buff *ctx)
473{
474	struct prog_test_ref_kfunc *p;
475	unsigned long sp = 0;
476
477	p = bpf_kfunc_call_test_acquire(&sp);
478	if (!p)
479		return 1;
480	ref++;
481	if (p->cnt.refs.counter != ref) {
482		bpf_kfunc_call_test_release(p);
483		return 2;
484	}
485	bpf_kfunc_call_test_release(p);
486	ref--;
487	return 0;
488}
489
490SEC("syscall")
491int test_ls_map_kptr_ref1(void *ctx)
492{
493	struct task_struct *current;
494	struct map_value *v;
495
496	current = bpf_get_current_task_btf();
497	if (!current)
498		return 100;
499	v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
500	if (v)
501		return 150;
502	v = bpf_task_storage_get(&task_ls_map, current, NULL, BPF_LOCAL_STORAGE_GET_F_CREATE);
503	if (!v)
504		return 200;
505	return test_map_kptr_ref_pre(v);
506}
507
508SEC("syscall")
509int test_ls_map_kptr_ref2(void *ctx)
510{
511	struct task_struct *current;
512	struct map_value *v;
513
514	current = bpf_get_current_task_btf();
515	if (!current)
516		return 100;
517	v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
518	if (!v)
519		return 200;
520	return test_map_kptr_ref_post(v);
521}
522
523SEC("syscall")
524int test_ls_map_kptr_ref_del(void *ctx)
525{
526	struct task_struct *current;
527	struct map_value *v;
528
529	current = bpf_get_current_task_btf();
530	if (!current)
531		return 100;
532	v = bpf_task_storage_get(&task_ls_map, current, NULL, 0);
533	if (!v)
534		return 200;
535	if (!v->ref_ptr)
536		return 300;
537	return bpf_task_storage_delete(&task_ls_map, current);
538}
539
540char _license[] SEC("license") = "GPL";
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <vmlinux.h>
  3#include <bpf/bpf_tracing.h>
  4#include <bpf/bpf_helpers.h>
 
  5
  6struct map_value {
  7	struct prog_test_ref_kfunc __kptr *unref_ptr;
  8	struct prog_test_ref_kfunc __kptr_ref *ref_ptr;
  9};
 10
 11struct array_map {
 12	__uint(type, BPF_MAP_TYPE_ARRAY);
 13	__type(key, int);
 14	__type(value, struct map_value);
 15	__uint(max_entries, 1);
 16} array_map SEC(".maps");
 17
 
 
 
 
 
 
 
 18struct hash_map {
 19	__uint(type, BPF_MAP_TYPE_HASH);
 20	__type(key, int);
 21	__type(value, struct map_value);
 22	__uint(max_entries, 1);
 23} hash_map SEC(".maps");
 24
 
 
 
 
 
 
 
 25struct hash_malloc_map {
 26	__uint(type, BPF_MAP_TYPE_HASH);
 27	__type(key, int);
 28	__type(value, struct map_value);
 29	__uint(max_entries, 1);
 30	__uint(map_flags, BPF_F_NO_PREALLOC);
 31} hash_malloc_map SEC(".maps");
 32
 
 
 
 
 
 
 
 
 33struct lru_hash_map {
 34	__uint(type, BPF_MAP_TYPE_LRU_HASH);
 35	__type(key, int);
 36	__type(value, struct map_value);
 37	__uint(max_entries, 1);
 38} lru_hash_map SEC(".maps");
 39
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 40#define DEFINE_MAP_OF_MAP(map_type, inner_map_type, name)       \
 41	struct {                                                \
 42		__uint(type, map_type);                         \
 43		__uint(max_entries, 1);                         \
 44		__uint(key_size, sizeof(int));                  \
 45		__uint(value_size, sizeof(int));                \
 46		__array(values, struct inner_map_type);         \
 47	} name SEC(".maps") = {                                 \
 48		.values = { [0] = &inner_map_type },            \
 49	}
 50
 51DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_map, array_of_array_maps);
 52DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_map, array_of_hash_maps);
 53DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, hash_malloc_map, array_of_hash_malloc_maps);
 54DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_ARRAY_OF_MAPS, lru_hash_map, array_of_lru_hash_maps);
 
 
 55DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, array_map, hash_of_array_maps);
 56DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_map, hash_of_hash_maps);
 57DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, hash_malloc_map, hash_of_hash_malloc_maps);
 58DEFINE_MAP_OF_MAP(BPF_MAP_TYPE_HASH_OF_MAPS, lru_hash_map, hash_of_lru_hash_maps);
 
 
 59
 60extern struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(unsigned long *sp) __ksym;
 61extern struct prog_test_ref_kfunc *
 62bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **p, int a, int b) __ksym;
 63extern void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) __ksym;
 64
 65static void test_kptr_unref(struct map_value *v)
 66{
 67	struct prog_test_ref_kfunc *p;
 68
 69	p = v->unref_ptr;
 70	/* store untrusted_ptr_or_null_ */
 71	v->unref_ptr = p;
 72	if (!p)
 73		return;
 74	if (p->a + p->b > 100)
 75		return;
 76	/* store untrusted_ptr_ */
 77	v->unref_ptr = p;
 78	/* store NULL */
 79	v->unref_ptr = NULL;
 80}
 81
 82static void test_kptr_ref(struct map_value *v)
 83{
 84	struct prog_test_ref_kfunc *p;
 85
 86	p = v->ref_ptr;
 87	/* store ptr_or_null_ */
 88	v->unref_ptr = p;
 89	if (!p)
 90		return;
 
 
 
 
 
 
 91	if (p->a + p->b > 100)
 92		return;
 93	/* store NULL */
 94	p = bpf_kptr_xchg(&v->ref_ptr, NULL);
 95	if (!p)
 96		return;
 
 
 
 
 
 97	if (p->a + p->b > 100) {
 98		bpf_kfunc_call_test_release(p);
 99		return;
100	}
101	/* store ptr_ */
102	v->unref_ptr = p;
103	bpf_kfunc_call_test_release(p);
104
105	p = bpf_kfunc_call_test_acquire(&(unsigned long){0});
106	if (!p)
107		return;
108	/* store ptr_ */
109	p = bpf_kptr_xchg(&v->ref_ptr, p);
110	if (!p)
111		return;
112	if (p->a + p->b > 100) {
113		bpf_kfunc_call_test_release(p);
114		return;
115	}
116	bpf_kfunc_call_test_release(p);
117}
118
119static void test_kptr_get(struct map_value *v)
120{
121	struct prog_test_ref_kfunc *p;
122
123	p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
124	if (!p)
125		return;
126	if (p->a + p->b > 100) {
127		bpf_kfunc_call_test_release(p);
128		return;
129	}
130	bpf_kfunc_call_test_release(p);
131}
132
133static void test_kptr(struct map_value *v)
134{
135	test_kptr_unref(v);
136	test_kptr_ref(v);
137	test_kptr_get(v);
138}
139
140SEC("tc")
141int test_map_kptr(struct __sk_buff *ctx)
142{
143	struct map_value *v;
144	int key = 0;
145
146#define TEST(map)					\
147	v = bpf_map_lookup_elem(&map, &key);		\
148	if (!v)						\
149		return 0;				\
150	test_kptr(v)
151
152	TEST(array_map);
153	TEST(hash_map);
154	TEST(hash_malloc_map);
155	TEST(lru_hash_map);
 
 
156
157#undef TEST
158	return 0;
159}
160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
161SEC("tc")
162int test_map_in_map_kptr(struct __sk_buff *ctx)
163{
164	struct map_value *v;
165	int key = 0;
166	void *map;
167
168#define TEST(map_in_map)                                \
169	map = bpf_map_lookup_elem(&map_in_map, &key);   \
170	if (!map)                                       \
171		return 0;                               \
172	v = bpf_map_lookup_elem(map, &key);		\
173	if (!v)						\
174		return 0;				\
175	test_kptr(v)
176
177	TEST(array_of_array_maps);
178	TEST(array_of_hash_maps);
179	TEST(array_of_hash_malloc_maps);
180	TEST(array_of_lru_hash_maps);
 
 
181	TEST(hash_of_array_maps);
182	TEST(hash_of_hash_maps);
183	TEST(hash_of_hash_malloc_maps);
184	TEST(hash_of_lru_hash_maps);
 
 
185
186#undef TEST
187	return 0;
188}
189
190SEC("tc")
191int test_map_kptr_ref(struct __sk_buff *ctx)
 
 
192{
193	struct prog_test_ref_kfunc *p, *p_st;
194	unsigned long arg = 0;
195	struct map_value *v;
196	int key = 0, ret;
197
198	p = bpf_kfunc_call_test_acquire(&arg);
199	if (!p)
200		return 1;
 
201
202	p_st = p->next;
203	if (p_st->cnt.refs.counter != 2) {
204		ret = 2;
205		goto end;
206	}
207
208	v = bpf_map_lookup_elem(&array_map, &key);
209	if (!v) {
210		ret = 3;
211		goto end;
212	}
213
214	p = bpf_kptr_xchg(&v->ref_ptr, p);
215	if (p) {
216		ret = 4;
217		goto end;
218	}
219	if (p_st->cnt.refs.counter != 2)
220		return 5;
221
222	p = bpf_kfunc_call_test_kptr_get(&v->ref_ptr, 0, 0);
223	if (!p)
224		return 6;
225	if (p_st->cnt.refs.counter != 3) {
226		ret = 7;
227		goto end;
228	}
229	bpf_kfunc_call_test_release(p);
230	if (p_st->cnt.refs.counter != 2)
231		return 8;
232
233	p = bpf_kptr_xchg(&v->ref_ptr, NULL);
234	if (!p)
235		return 9;
236	bpf_kfunc_call_test_release(p);
237	if (p_st->cnt.refs.counter != 1)
238		return 10;
 
239
240	p = bpf_kfunc_call_test_acquire(&arg);
241	if (!p)
242		return 11;
 
243	p = bpf_kptr_xchg(&v->ref_ptr, p);
244	if (p) {
245		ret = 12;
246		goto end;
247	}
248	if (p_st->cnt.refs.counter != 2)
249		return 13;
250	/* Leave in map */
251
252	return 0;
253end:
 
254	bpf_kfunc_call_test_release(p);
255	return ret;
256}
257
258SEC("tc")
259int test_map_kptr_ref2(struct __sk_buff *ctx)
260{
261	struct prog_test_ref_kfunc *p, *p_st;
262	struct map_value *v;
263	int key = 0;
264
265	v = bpf_map_lookup_elem(&array_map, &key);
266	if (!v)
267		return 1;
268
269	p_st = v->ref_ptr;
270	if (!p_st || p_st->cnt.refs.counter != 2)
271		return 2;
272
273	p = bpf_kptr_xchg(&v->ref_ptr, NULL);
274	if (!p)
 
 
 
275		return 3;
276	if (p_st->cnt.refs.counter != 2) {
277		bpf_kfunc_call_test_release(p);
278		return 4;
279	}
280
281	p = bpf_kptr_xchg(&v->ref_ptr, p);
282	if (p) {
283		bpf_kfunc_call_test_release(p);
 
 
 
284		return 5;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
285	}
286	if (p_st->cnt.refs.counter != 2)
287		return 6;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
289	return 0;
 
 
 
 
 
 
 
 
290}
291
292char _license[] SEC("license") = "GPL";