Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (c) 2020 Facebook
  3
  4#include <linux/bpf.h>
  5#include <bpf/bpf_helpers.h>
  6
  7#define LOOP_BOUND 0xf
  8#define MAX_ENTRIES 8
  9#define HALF_ENTRIES (MAX_ENTRIES >> 1)
 10
 11_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
 12
 13enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
 14__u32 g_line = 0;
 
 15
 16#define VERIFY_TYPE(type, func) ({	\
 17	g_map_type = type;		\
 18	if (!func())			\
 19		return 0;		\
 20})
 21
 22
 23#define VERIFY(expr) ({		\
 24	g_line = __LINE__;	\
 25	if (!(expr))		\
 26		return 0;	\
 27})
 28
 29struct bpf_map_memory {
 30	__u32 pages;
 31} __attribute__((preserve_access_index));
 32
 33struct bpf_map {
 34	enum bpf_map_type map_type;
 35	__u32 key_size;
 36	__u32 value_size;
 37	__u32 max_entries;
 38	__u32 id;
 39	struct bpf_map_memory memory;
 40} __attribute__((preserve_access_index));
 41
 42static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
 43				       __u32 value_size, __u32 max_entries)
 44{
 45	VERIFY(map->map_type == g_map_type);
 46	VERIFY(map->key_size == key_size);
 47	VERIFY(map->value_size == value_size);
 48	VERIFY(map->max_entries == max_entries);
 49	VERIFY(map->id > 0);
 50	VERIFY(map->memory.pages > 0);
 51
 52	return 1;
 53}
 54
 55static inline int check_bpf_map_ptr(struct bpf_map *indirect,
 56				    struct bpf_map *direct)
 57{
 58	VERIFY(indirect->map_type == direct->map_type);
 59	VERIFY(indirect->key_size == direct->key_size);
 60	VERIFY(indirect->value_size == direct->value_size);
 61	VERIFY(indirect->max_entries == direct->max_entries);
 62	VERIFY(indirect->id == direct->id);
 63	VERIFY(indirect->memory.pages == direct->memory.pages);
 64
 65	return 1;
 66}
 67
 68static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
 69			__u32 key_size, __u32 value_size, __u32 max_entries)
 70{
 71	VERIFY(check_bpf_map_ptr(indirect, direct));
 72	VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
 73				    max_entries));
 74	return 1;
 75}
 76
 77static inline int check_default(struct bpf_map *indirect,
 78				struct bpf_map *direct)
 79{
 80	VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
 81		     MAX_ENTRIES));
 82	return 1;
 83}
 84
 
 
 
 
 
 
 
 
 85typedef struct {
 86	int counter;
 87} atomic_t;
 88
 89struct bpf_htab {
 90	struct bpf_map map;
 91	atomic_t count;
 92	__u32 n_buckets;
 93	__u32 elem_size;
 94} __attribute__((preserve_access_index));
 95
 96struct {
 97	__uint(type, BPF_MAP_TYPE_HASH);
 98	__uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
 99	__uint(max_entries, MAX_ENTRIES);
100	__type(key, __u32);
101	__type(value, __u32);
102} m_hash SEC(".maps");
103
104static inline int check_hash(void)
105{
106	struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
107	struct bpf_map *map = (struct bpf_map *)&m_hash;
108	int i;
109
110	VERIFY(check_default(&hash->map, map));
111
112	VERIFY(hash->n_buckets == MAX_ENTRIES);
113	VERIFY(hash->elem_size == 64);
114
115	VERIFY(hash->count.counter == 0);
116	for (i = 0; i < HALF_ENTRIES; ++i) {
117		const __u32 key = i;
118		const __u32 val = 1;
119
120		if (bpf_map_update_elem(hash, &key, &val, 0))
121			return 0;
122	}
123	VERIFY(hash->count.counter == HALF_ENTRIES);
124
125	return 1;
126}
127
128struct bpf_array {
129	struct bpf_map map;
130	__u32 elem_size;
131} __attribute__((preserve_access_index));
132
133struct {
134	__uint(type, BPF_MAP_TYPE_ARRAY);
135	__uint(max_entries, MAX_ENTRIES);
136	__type(key, __u32);
137	__type(value, __u32);
138} m_array SEC(".maps");
139
140static inline int check_array(void)
141{
142	struct bpf_array *array = (struct bpf_array *)&m_array;
143	struct bpf_map *map = (struct bpf_map *)&m_array;
144	int i, n_lookups = 0, n_keys = 0;
145
146	VERIFY(check_default(&array->map, map));
147
148	VERIFY(array->elem_size == 8);
149
150	for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
151		const __u32 key = i;
152		__u32 *val = bpf_map_lookup_elem(array, &key);
153
154		++n_lookups;
155		if (val)
156			++n_keys;
157	}
158
159	VERIFY(n_lookups == MAX_ENTRIES);
160	VERIFY(n_keys == MAX_ENTRIES);
161
162	return 1;
163}
164
165struct {
166	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
167	__uint(max_entries, MAX_ENTRIES);
168	__type(key, __u32);
169	__type(value, __u32);
170} m_prog_array SEC(".maps");
171
172static inline int check_prog_array(void)
173{
174	struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
175	struct bpf_map *map = (struct bpf_map *)&m_prog_array;
176
177	VERIFY(check_default(&prog_array->map, map));
178
179	return 1;
180}
181
182struct {
183	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
184	__uint(max_entries, MAX_ENTRIES);
185	__type(key, __u32);
186	__type(value, __u32);
187} m_perf_event_array SEC(".maps");
188
189static inline int check_perf_event_array(void)
190{
191	struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
192	struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
193
194	VERIFY(check_default(&perf_event_array->map, map));
195
196	return 1;
197}
198
199struct {
200	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
201	__uint(max_entries, MAX_ENTRIES);
202	__type(key, __u32);
203	__type(value, __u32);
204} m_percpu_hash SEC(".maps");
205
206static inline int check_percpu_hash(void)
207{
208	struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
209	struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
210
211	VERIFY(check_default(&percpu_hash->map, map));
212
213	return 1;
214}
215
216struct {
217	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
218	__uint(max_entries, MAX_ENTRIES);
219	__type(key, __u32);
220	__type(value, __u32);
221} m_percpu_array SEC(".maps");
222
223static inline int check_percpu_array(void)
224{
225	struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
226	struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
227
228	VERIFY(check_default(&percpu_array->map, map));
229
230	return 1;
231}
232
233struct bpf_stack_map {
234	struct bpf_map map;
235} __attribute__((preserve_access_index));
236
237struct {
238	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
239	__uint(max_entries, MAX_ENTRIES);
240	__type(key, __u32);
241	__type(value, __u64);
242} m_stack_trace SEC(".maps");
243
244static inline int check_stack_trace(void)
245{
246	struct bpf_stack_map *stack_trace =
247		(struct bpf_stack_map *)&m_stack_trace;
248	struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
249
250	VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
251		     MAX_ENTRIES));
252
253	return 1;
254}
255
256struct {
257	__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
258	__uint(max_entries, MAX_ENTRIES);
259	__type(key, __u32);
260	__type(value, __u32);
261} m_cgroup_array SEC(".maps");
262
263static inline int check_cgroup_array(void)
264{
265	struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
266	struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
267
268	VERIFY(check_default(&cgroup_array->map, map));
269
270	return 1;
271}
272
273struct {
274	__uint(type, BPF_MAP_TYPE_LRU_HASH);
275	__uint(max_entries, MAX_ENTRIES);
276	__type(key, __u32);
277	__type(value, __u32);
278} m_lru_hash SEC(".maps");
279
280static inline int check_lru_hash(void)
281{
282	struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
283	struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
284
285	VERIFY(check_default(&lru_hash->map, map));
286
287	return 1;
288}
289
290struct {
291	__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
292	__uint(max_entries, MAX_ENTRIES);
293	__type(key, __u32);
294	__type(value, __u32);
295} m_lru_percpu_hash SEC(".maps");
296
297static inline int check_lru_percpu_hash(void)
298{
299	struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
300	struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
301
302	VERIFY(check_default(&lru_percpu_hash->map, map));
303
304	return 1;
305}
306
307struct lpm_trie {
308	struct bpf_map map;
309} __attribute__((preserve_access_index));
310
311struct lpm_key {
312	struct bpf_lpm_trie_key trie_key;
313	__u32 data;
314};
315
316struct {
317	__uint(type, BPF_MAP_TYPE_LPM_TRIE);
318	__uint(map_flags, BPF_F_NO_PREALLOC);
319	__uint(max_entries, MAX_ENTRIES);
320	__type(key, struct lpm_key);
321	__type(value, __u32);
322} m_lpm_trie SEC(".maps");
323
324static inline int check_lpm_trie(void)
325{
326	struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
327	struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
328
329	VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
330		     MAX_ENTRIES));
331
332	return 1;
333}
334
 
 
335struct inner_map {
336	__uint(type, BPF_MAP_TYPE_ARRAY);
337	__uint(max_entries, 1);
338	__type(key, __u32);
339	__type(value, __u32);
340} inner_map SEC(".maps");
341
342struct {
343	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
344	__uint(max_entries, MAX_ENTRIES);
345	__type(key, __u32);
346	__type(value, __u32);
347	__array(values, struct {
348		__uint(type, BPF_MAP_TYPE_ARRAY);
349		__uint(max_entries, 1);
350		__type(key, __u32);
351		__type(value, __u32);
352	});
353} m_array_of_maps SEC(".maps") = {
354	.values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
355};
356
357static inline int check_array_of_maps(void)
358{
359	struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
360	struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
 
 
361
362	VERIFY(check_default(&array_of_maps->map, map));
 
 
 
363
364	return 1;
365}
366
367struct {
368	__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
369	__uint(max_entries, MAX_ENTRIES);
370	__type(key, __u32);
371	__type(value, __u32);
372	__array(values, struct inner_map);
373} m_hash_of_maps SEC(".maps") = {
374	.values = {
375		[2] = &inner_map,
376	},
377};
378
379static inline int check_hash_of_maps(void)
380{
381	struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
382	struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
 
 
383
384	VERIFY(check_default(&hash_of_maps->map, map));
 
 
 
385
386	return 1;
387}
388
389struct bpf_dtab {
390	struct bpf_map map;
391} __attribute__((preserve_access_index));
392
393struct {
394	__uint(type, BPF_MAP_TYPE_DEVMAP);
395	__uint(max_entries, MAX_ENTRIES);
396	__type(key, __u32);
397	__type(value, __u32);
398} m_devmap SEC(".maps");
399
400static inline int check_devmap(void)
401{
402	struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
403	struct bpf_map *map = (struct bpf_map *)&m_devmap;
404
405	VERIFY(check_default(&devmap->map, map));
406
407	return 1;
408}
409
410struct bpf_stab {
411	struct bpf_map map;
412} __attribute__((preserve_access_index));
413
414struct {
415	__uint(type, BPF_MAP_TYPE_SOCKMAP);
416	__uint(max_entries, MAX_ENTRIES);
417	__type(key, __u32);
418	__type(value, __u32);
419} m_sockmap SEC(".maps");
420
421static inline int check_sockmap(void)
422{
423	struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
424	struct bpf_map *map = (struct bpf_map *)&m_sockmap;
425
426	VERIFY(check_default(&sockmap->map, map));
427
428	return 1;
429}
430
431struct bpf_cpu_map {
432	struct bpf_map map;
433} __attribute__((preserve_access_index));
434
435struct {
436	__uint(type, BPF_MAP_TYPE_CPUMAP);
437	__uint(max_entries, MAX_ENTRIES);
438	__type(key, __u32);
439	__type(value, __u32);
440} m_cpumap SEC(".maps");
441
442static inline int check_cpumap(void)
443{
444	struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
445	struct bpf_map *map = (struct bpf_map *)&m_cpumap;
446
447	VERIFY(check_default(&cpumap->map, map));
448
449	return 1;
450}
451
452struct xsk_map {
453	struct bpf_map map;
454} __attribute__((preserve_access_index));
455
456struct {
457	__uint(type, BPF_MAP_TYPE_XSKMAP);
458	__uint(max_entries, MAX_ENTRIES);
459	__type(key, __u32);
460	__type(value, __u32);
461} m_xskmap SEC(".maps");
462
463static inline int check_xskmap(void)
464{
465	struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
466	struct bpf_map *map = (struct bpf_map *)&m_xskmap;
467
468	VERIFY(check_default(&xskmap->map, map));
469
470	return 1;
471}
472
473struct bpf_shtab {
474	struct bpf_map map;
475} __attribute__((preserve_access_index));
476
477struct {
478	__uint(type, BPF_MAP_TYPE_SOCKHASH);
479	__uint(max_entries, MAX_ENTRIES);
480	__type(key, __u32);
481	__type(value, __u32);
482} m_sockhash SEC(".maps");
483
484static inline int check_sockhash(void)
485{
486	struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
487	struct bpf_map *map = (struct bpf_map *)&m_sockhash;
488
489	VERIFY(check_default(&sockhash->map, map));
490
491	return 1;
492}
493
494struct bpf_cgroup_storage_map {
495	struct bpf_map map;
496} __attribute__((preserve_access_index));
497
498struct {
499	__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
500	__type(key, struct bpf_cgroup_storage_key);
501	__type(value, __u32);
502} m_cgroup_storage SEC(".maps");
503
504static inline int check_cgroup_storage(void)
505{
506	struct bpf_cgroup_storage_map *cgroup_storage =
507		(struct bpf_cgroup_storage_map *)&m_cgroup_storage;
508	struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
509
510	VERIFY(check(&cgroup_storage->map, map,
511		     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
512
513	return 1;
514}
515
516struct reuseport_array {
517	struct bpf_map map;
518} __attribute__((preserve_access_index));
519
520struct {
521	__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
522	__uint(max_entries, MAX_ENTRIES);
523	__type(key, __u32);
524	__type(value, __u32);
525} m_reuseport_sockarray SEC(".maps");
526
527static inline int check_reuseport_sockarray(void)
528{
529	struct reuseport_array *reuseport_sockarray =
530		(struct reuseport_array *)&m_reuseport_sockarray;
531	struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
532
533	VERIFY(check_default(&reuseport_sockarray->map, map));
534
535	return 1;
536}
537
538struct {
539	__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
540	__type(key, struct bpf_cgroup_storage_key);
541	__type(value, __u32);
542} m_percpu_cgroup_storage SEC(".maps");
543
544static inline int check_percpu_cgroup_storage(void)
545{
546	struct bpf_cgroup_storage_map *percpu_cgroup_storage =
547		(struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
548	struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
549
550	VERIFY(check(&percpu_cgroup_storage->map, map,
551		     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
552
553	return 1;
554}
555
556struct bpf_queue_stack {
557	struct bpf_map map;
558} __attribute__((preserve_access_index));
559
560struct {
561	__uint(type, BPF_MAP_TYPE_QUEUE);
562	__uint(max_entries, MAX_ENTRIES);
563	__type(value, __u32);
564} m_queue SEC(".maps");
565
566static inline int check_queue(void)
567{
568	struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
569	struct bpf_map *map = (struct bpf_map *)&m_queue;
570
571	VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
572
573	return 1;
574}
575
576struct {
577	__uint(type, BPF_MAP_TYPE_STACK);
578	__uint(max_entries, MAX_ENTRIES);
579	__type(value, __u32);
580} m_stack SEC(".maps");
581
582static inline int check_stack(void)
583{
584	struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
585	struct bpf_map *map = (struct bpf_map *)&m_stack;
586
587	VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
588
589	return 1;
590}
591
592struct bpf_sk_storage_map {
593	struct bpf_map map;
594} __attribute__((preserve_access_index));
595
596struct {
597	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
598	__uint(map_flags, BPF_F_NO_PREALLOC);
599	__type(key, __u32);
600	__type(value, __u32);
601} m_sk_storage SEC(".maps");
602
603static inline int check_sk_storage(void)
604{
605	struct bpf_sk_storage_map *sk_storage =
606		(struct bpf_sk_storage_map *)&m_sk_storage;
607	struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
608
609	VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
610
611	return 1;
612}
613
614struct {
615	__uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
616	__uint(max_entries, MAX_ENTRIES);
617	__type(key, __u32);
618	__type(value, __u32);
619} m_devmap_hash SEC(".maps");
620
621static inline int check_devmap_hash(void)
622{
623	struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
624	struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
625
626	VERIFY(check_default(&devmap_hash->map, map));
627
628	return 1;
629}
630
631struct bpf_ringbuf_map {
632	struct bpf_map map;
633} __attribute__((preserve_access_index));
634
635struct {
636	__uint(type, BPF_MAP_TYPE_RINGBUF);
637	__uint(max_entries, 1 << 12);
638} m_ringbuf SEC(".maps");
639
640static inline int check_ringbuf(void)
641{
642	struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
643	struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
644
645	VERIFY(check(&ringbuf->map, map, 0, 0, 1 << 12));
646
647	return 1;
648}
649
650SEC("cgroup_skb/egress")
651int cg_skb(void *ctx)
652{
653	VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
654	VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
655	VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
656	VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
657	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
658	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
659	VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
660	VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
661	VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
662	VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
663	VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
664	VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
665	VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
666	VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
667	VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
668	VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
669	VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
670	VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
671	VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
672	VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
673		    check_reuseport_sockarray);
674	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
675		    check_percpu_cgroup_storage);
676	VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
677	VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
678	VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
679	VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
680	VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
681
682	return 1;
683}
684
685__u32 _version SEC("version") = 1;
686char _license[] SEC("license") = "GPL";
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2// Copyright (c) 2020 Facebook
  3
  4#include <linux/bpf.h>
  5#include <bpf/bpf_helpers.h>
  6
  7#define LOOP_BOUND 0xf
  8#define MAX_ENTRIES 8
  9#define HALF_ENTRIES (MAX_ENTRIES >> 1)
 10
 11_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
 12
 13enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
 14__u32 g_line = 0;
 15int page_size = 0; /* userspace should set it */
 16
 17#define VERIFY_TYPE(type, func) ({	\
 18	g_map_type = type;		\
 19	if (!func())			\
 20		return 0;		\
 21})
 22
 23
 24#define VERIFY(expr) ({		\
 25	g_line = __LINE__;	\
 26	if (!(expr))		\
 27		return 0;	\
 28})
 29
 
 
 
 
 30struct bpf_map {
 31	enum bpf_map_type map_type;
 32	__u32 key_size;
 33	__u32 value_size;
 34	__u32 max_entries;
 35	__u32 id;
 
 36} __attribute__((preserve_access_index));
 37
 38static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
 39				       __u32 value_size, __u32 max_entries)
 40{
 41	VERIFY(map->map_type == g_map_type);
 42	VERIFY(map->key_size == key_size);
 43	VERIFY(map->value_size == value_size);
 44	VERIFY(map->max_entries == max_entries);
 45	VERIFY(map->id > 0);
 
 46
 47	return 1;
 48}
 49
 50static inline int check_bpf_map_ptr(struct bpf_map *indirect,
 51				    struct bpf_map *direct)
 52{
 53	VERIFY(indirect->map_type == direct->map_type);
 54	VERIFY(indirect->key_size == direct->key_size);
 55	VERIFY(indirect->value_size == direct->value_size);
 56	VERIFY(indirect->max_entries == direct->max_entries);
 57	VERIFY(indirect->id == direct->id);
 
 58
 59	return 1;
 60}
 61
 62static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
 63			__u32 key_size, __u32 value_size, __u32 max_entries)
 64{
 65	VERIFY(check_bpf_map_ptr(indirect, direct));
 66	VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
 67				    max_entries));
 68	return 1;
 69}
 70
 71static inline int check_default(struct bpf_map *indirect,
 72				struct bpf_map *direct)
 73{
 74	VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
 75		     MAX_ENTRIES));
 76	return 1;
 77}
 78
 79static __noinline int
 80check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
 81{
 82	VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
 83		     MAX_ENTRIES));
 84	return 1;
 85}
 86
 87typedef struct {
 88	int counter;
 89} atomic_t;
 90
 91struct bpf_htab {
 92	struct bpf_map map;
 93	atomic_t count;
 94	__u32 n_buckets;
 95	__u32 elem_size;
 96} __attribute__((preserve_access_index));
 97
 98struct {
 99	__uint(type, BPF_MAP_TYPE_HASH);
100	__uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
101	__uint(max_entries, MAX_ENTRIES);
102	__type(key, __u32);
103	__type(value, __u32);
104} m_hash SEC(".maps");
105
106static inline int check_hash(void)
107{
108	struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
109	struct bpf_map *map = (struct bpf_map *)&m_hash;
110	int i;
111
112	VERIFY(check_default_noinline(&hash->map, map));
113
114	VERIFY(hash->n_buckets == MAX_ENTRIES);
115	VERIFY(hash->elem_size == 64);
116
117	VERIFY(hash->count.counter == 0);
118	for (i = 0; i < HALF_ENTRIES; ++i) {
119		const __u32 key = i;
120		const __u32 val = 1;
121
122		if (bpf_map_update_elem(hash, &key, &val, 0))
123			return 0;
124	}
125	VERIFY(hash->count.counter == HALF_ENTRIES);
126
127	return 1;
128}
129
130struct bpf_array {
131	struct bpf_map map;
132	__u32 elem_size;
133} __attribute__((preserve_access_index));
134
135struct {
136	__uint(type, BPF_MAP_TYPE_ARRAY);
137	__uint(max_entries, MAX_ENTRIES);
138	__type(key, __u32);
139	__type(value, __u32);
140} m_array SEC(".maps");
141
142static inline int check_array(void)
143{
144	struct bpf_array *array = (struct bpf_array *)&m_array;
145	struct bpf_map *map = (struct bpf_map *)&m_array;
146	int i, n_lookups = 0, n_keys = 0;
147
148	VERIFY(check_default(&array->map, map));
149
150	VERIFY(array->elem_size == 8);
151
152	for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
153		const __u32 key = i;
154		__u32 *val = bpf_map_lookup_elem(array, &key);
155
156		++n_lookups;
157		if (val)
158			++n_keys;
159	}
160
161	VERIFY(n_lookups == MAX_ENTRIES);
162	VERIFY(n_keys == MAX_ENTRIES);
163
164	return 1;
165}
166
167struct {
168	__uint(type, BPF_MAP_TYPE_PROG_ARRAY);
169	__uint(max_entries, MAX_ENTRIES);
170	__type(key, __u32);
171	__type(value, __u32);
172} m_prog_array SEC(".maps");
173
174static inline int check_prog_array(void)
175{
176	struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
177	struct bpf_map *map = (struct bpf_map *)&m_prog_array;
178
179	VERIFY(check_default(&prog_array->map, map));
180
181	return 1;
182}
183
184struct {
185	__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
186	__uint(max_entries, MAX_ENTRIES);
187	__type(key, __u32);
188	__type(value, __u32);
189} m_perf_event_array SEC(".maps");
190
191static inline int check_perf_event_array(void)
192{
193	struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
194	struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
195
196	VERIFY(check_default(&perf_event_array->map, map));
197
198	return 1;
199}
200
201struct {
202	__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
203	__uint(max_entries, MAX_ENTRIES);
204	__type(key, __u32);
205	__type(value, __u32);
206} m_percpu_hash SEC(".maps");
207
208static inline int check_percpu_hash(void)
209{
210	struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
211	struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
212
213	VERIFY(check_default(&percpu_hash->map, map));
214
215	return 1;
216}
217
218struct {
219	__uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
220	__uint(max_entries, MAX_ENTRIES);
221	__type(key, __u32);
222	__type(value, __u32);
223} m_percpu_array SEC(".maps");
224
225static inline int check_percpu_array(void)
226{
227	struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
228	struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
229
230	VERIFY(check_default(&percpu_array->map, map));
231
232	return 1;
233}
234
235struct bpf_stack_map {
236	struct bpf_map map;
237} __attribute__((preserve_access_index));
238
239struct {
240	__uint(type, BPF_MAP_TYPE_STACK_TRACE);
241	__uint(max_entries, MAX_ENTRIES);
242	__type(key, __u32);
243	__type(value, __u64);
244} m_stack_trace SEC(".maps");
245
246static inline int check_stack_trace(void)
247{
248	struct bpf_stack_map *stack_trace =
249		(struct bpf_stack_map *)&m_stack_trace;
250	struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
251
252	VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
253		     MAX_ENTRIES));
254
255	return 1;
256}
257
258struct {
259	__uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
260	__uint(max_entries, MAX_ENTRIES);
261	__type(key, __u32);
262	__type(value, __u32);
263} m_cgroup_array SEC(".maps");
264
265static inline int check_cgroup_array(void)
266{
267	struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
268	struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
269
270	VERIFY(check_default(&cgroup_array->map, map));
271
272	return 1;
273}
274
275struct {
276	__uint(type, BPF_MAP_TYPE_LRU_HASH);
277	__uint(max_entries, MAX_ENTRIES);
278	__type(key, __u32);
279	__type(value, __u32);
280} m_lru_hash SEC(".maps");
281
282static inline int check_lru_hash(void)
283{
284	struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
285	struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
286
287	VERIFY(check_default(&lru_hash->map, map));
288
289	return 1;
290}
291
292struct {
293	__uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
294	__uint(max_entries, MAX_ENTRIES);
295	__type(key, __u32);
296	__type(value, __u32);
297} m_lru_percpu_hash SEC(".maps");
298
299static inline int check_lru_percpu_hash(void)
300{
301	struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
302	struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
303
304	VERIFY(check_default(&lru_percpu_hash->map, map));
305
306	return 1;
307}
308
309struct lpm_trie {
310	struct bpf_map map;
311} __attribute__((preserve_access_index));
312
313struct lpm_key {
314	struct bpf_lpm_trie_key trie_key;
315	__u32 data;
316};
317
318struct {
319	__uint(type, BPF_MAP_TYPE_LPM_TRIE);
320	__uint(map_flags, BPF_F_NO_PREALLOC);
321	__uint(max_entries, MAX_ENTRIES);
322	__type(key, struct lpm_key);
323	__type(value, __u32);
324} m_lpm_trie SEC(".maps");
325
326static inline int check_lpm_trie(void)
327{
328	struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
329	struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
330
331	VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
332		     MAX_ENTRIES));
333
334	return 1;
335}
336
337#define INNER_MAX_ENTRIES 1234
338
339struct inner_map {
340	__uint(type, BPF_MAP_TYPE_ARRAY);
341	__uint(max_entries, INNER_MAX_ENTRIES);
342	__type(key, __u32);
343	__type(value, __u32);
344} inner_map SEC(".maps");
345
346struct {
347	__uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
348	__uint(max_entries, MAX_ENTRIES);
349	__type(key, __u32);
350	__type(value, __u32);
351	__array(values, struct {
352		__uint(type, BPF_MAP_TYPE_ARRAY);
353		__uint(max_entries, INNER_MAX_ENTRIES);
354		__type(key, __u32);
355		__type(value, __u32);
356	});
357} m_array_of_maps SEC(".maps") = {
358	.values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
359};
360
361static inline int check_array_of_maps(void)
362{
363	struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
364	struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
365	struct bpf_array *inner_map;
366	int key = 0;
367
368	VERIFY(check_default(&array_of_maps->map, map));
369	inner_map = bpf_map_lookup_elem(array_of_maps, &key);
370	VERIFY(inner_map != NULL);
371	VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
372
373	return 1;
374}
375
376struct {
377	__uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
378	__uint(max_entries, MAX_ENTRIES);
379	__type(key, __u32);
380	__type(value, __u32);
381	__array(values, struct inner_map);
382} m_hash_of_maps SEC(".maps") = {
383	.values = {
384		[2] = &inner_map,
385	},
386};
387
388static inline int check_hash_of_maps(void)
389{
390	struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
391	struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
392	struct bpf_htab *inner_map;
393	int key = 2;
394
395	VERIFY(check_default(&hash_of_maps->map, map));
396	inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
397	VERIFY(inner_map != NULL);
398	VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
399
400	return 1;
401}
402
403struct bpf_dtab {
404	struct bpf_map map;
405} __attribute__((preserve_access_index));
406
407struct {
408	__uint(type, BPF_MAP_TYPE_DEVMAP);
409	__uint(max_entries, MAX_ENTRIES);
410	__type(key, __u32);
411	__type(value, __u32);
412} m_devmap SEC(".maps");
413
414static inline int check_devmap(void)
415{
416	struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
417	struct bpf_map *map = (struct bpf_map *)&m_devmap;
418
419	VERIFY(check_default(&devmap->map, map));
420
421	return 1;
422}
423
424struct bpf_stab {
425	struct bpf_map map;
426} __attribute__((preserve_access_index));
427
428struct {
429	__uint(type, BPF_MAP_TYPE_SOCKMAP);
430	__uint(max_entries, MAX_ENTRIES);
431	__type(key, __u32);
432	__type(value, __u32);
433} m_sockmap SEC(".maps");
434
435static inline int check_sockmap(void)
436{
437	struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
438	struct bpf_map *map = (struct bpf_map *)&m_sockmap;
439
440	VERIFY(check_default(&sockmap->map, map));
441
442	return 1;
443}
444
445struct bpf_cpu_map {
446	struct bpf_map map;
447} __attribute__((preserve_access_index));
448
449struct {
450	__uint(type, BPF_MAP_TYPE_CPUMAP);
451	__uint(max_entries, MAX_ENTRIES);
452	__type(key, __u32);
453	__type(value, __u32);
454} m_cpumap SEC(".maps");
455
456static inline int check_cpumap(void)
457{
458	struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
459	struct bpf_map *map = (struct bpf_map *)&m_cpumap;
460
461	VERIFY(check_default(&cpumap->map, map));
462
463	return 1;
464}
465
466struct xsk_map {
467	struct bpf_map map;
468} __attribute__((preserve_access_index));
469
470struct {
471	__uint(type, BPF_MAP_TYPE_XSKMAP);
472	__uint(max_entries, MAX_ENTRIES);
473	__type(key, __u32);
474	__type(value, __u32);
475} m_xskmap SEC(".maps");
476
477static inline int check_xskmap(void)
478{
479	struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
480	struct bpf_map *map = (struct bpf_map *)&m_xskmap;
481
482	VERIFY(check_default(&xskmap->map, map));
483
484	return 1;
485}
486
487struct bpf_shtab {
488	struct bpf_map map;
489} __attribute__((preserve_access_index));
490
491struct {
492	__uint(type, BPF_MAP_TYPE_SOCKHASH);
493	__uint(max_entries, MAX_ENTRIES);
494	__type(key, __u32);
495	__type(value, __u32);
496} m_sockhash SEC(".maps");
497
498static inline int check_sockhash(void)
499{
500	struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
501	struct bpf_map *map = (struct bpf_map *)&m_sockhash;
502
503	VERIFY(check_default(&sockhash->map, map));
504
505	return 1;
506}
507
508struct bpf_cgroup_storage_map {
509	struct bpf_map map;
510} __attribute__((preserve_access_index));
511
512struct {
513	__uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
514	__type(key, struct bpf_cgroup_storage_key);
515	__type(value, __u32);
516} m_cgroup_storage SEC(".maps");
517
518static inline int check_cgroup_storage(void)
519{
520	struct bpf_cgroup_storage_map *cgroup_storage =
521		(struct bpf_cgroup_storage_map *)&m_cgroup_storage;
522	struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
523
524	VERIFY(check(&cgroup_storage->map, map,
525		     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
526
527	return 1;
528}
529
530struct reuseport_array {
531	struct bpf_map map;
532} __attribute__((preserve_access_index));
533
534struct {
535	__uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
536	__uint(max_entries, MAX_ENTRIES);
537	__type(key, __u32);
538	__type(value, __u32);
539} m_reuseport_sockarray SEC(".maps");
540
541static inline int check_reuseport_sockarray(void)
542{
543	struct reuseport_array *reuseport_sockarray =
544		(struct reuseport_array *)&m_reuseport_sockarray;
545	struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
546
547	VERIFY(check_default(&reuseport_sockarray->map, map));
548
549	return 1;
550}
551
552struct {
553	__uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
554	__type(key, struct bpf_cgroup_storage_key);
555	__type(value, __u32);
556} m_percpu_cgroup_storage SEC(".maps");
557
558static inline int check_percpu_cgroup_storage(void)
559{
560	struct bpf_cgroup_storage_map *percpu_cgroup_storage =
561		(struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
562	struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
563
564	VERIFY(check(&percpu_cgroup_storage->map, map,
565		     sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
566
567	return 1;
568}
569
570struct bpf_queue_stack {
571	struct bpf_map map;
572} __attribute__((preserve_access_index));
573
574struct {
575	__uint(type, BPF_MAP_TYPE_QUEUE);
576	__uint(max_entries, MAX_ENTRIES);
577	__type(value, __u32);
578} m_queue SEC(".maps");
579
580static inline int check_queue(void)
581{
582	struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
583	struct bpf_map *map = (struct bpf_map *)&m_queue;
584
585	VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
586
587	return 1;
588}
589
590struct {
591	__uint(type, BPF_MAP_TYPE_STACK);
592	__uint(max_entries, MAX_ENTRIES);
593	__type(value, __u32);
594} m_stack SEC(".maps");
595
596static inline int check_stack(void)
597{
598	struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
599	struct bpf_map *map = (struct bpf_map *)&m_stack;
600
601	VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
602
603	return 1;
604}
605
606struct bpf_local_storage_map {
607	struct bpf_map map;
608} __attribute__((preserve_access_index));
609
610struct {
611	__uint(type, BPF_MAP_TYPE_SK_STORAGE);
612	__uint(map_flags, BPF_F_NO_PREALLOC);
613	__type(key, __u32);
614	__type(value, __u32);
615} m_sk_storage SEC(".maps");
616
617static inline int check_sk_storage(void)
618{
619	struct bpf_local_storage_map *sk_storage =
620		(struct bpf_local_storage_map *)&m_sk_storage;
621	struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
622
623	VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
624
625	return 1;
626}
627
628struct {
629	__uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
630	__uint(max_entries, MAX_ENTRIES);
631	__type(key, __u32);
632	__type(value, __u32);
633} m_devmap_hash SEC(".maps");
634
635static inline int check_devmap_hash(void)
636{
637	struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
638	struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
639
640	VERIFY(check_default(&devmap_hash->map, map));
641
642	return 1;
643}
644
645struct bpf_ringbuf_map {
646	struct bpf_map map;
647} __attribute__((preserve_access_index));
648
649struct {
650	__uint(type, BPF_MAP_TYPE_RINGBUF);
 
651} m_ringbuf SEC(".maps");
652
653static inline int check_ringbuf(void)
654{
655	struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
656	struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
657
658	VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
659
660	return 1;
661}
662
663SEC("cgroup_skb/egress")
664int cg_skb(void *ctx)
665{
666	VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
667	VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
668	VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
669	VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
670	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
671	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
672	VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
673	VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
674	VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
675	VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
676	VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
677	VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
678	VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
679	VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
680	VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
681	VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
682	VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
683	VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
684	VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
685	VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
686		    check_reuseport_sockarray);
687	VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
688		    check_percpu_cgroup_storage);
689	VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
690	VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
691	VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
692	VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
693	VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
694
695	return 1;
696}
697
 
698char _license[] SEC("license") = "GPL";