Loading...
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2020 Facebook
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6
7#define LOOP_BOUND 0xf
8#define MAX_ENTRIES 8
9#define HALF_ENTRIES (MAX_ENTRIES >> 1)
10
11_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
12
13enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
14__u32 g_line = 0;
15int page_size = 0; /* userspace should set it */
16
17#define VERIFY_TYPE(type, func) ({ \
18 g_map_type = type; \
19 if (!func()) \
20 return 0; \
21})
22
23
24#define VERIFY(expr) ({ \
25 g_line = __LINE__; \
26 if (!(expr)) \
27 return 0; \
28})
29
30struct bpf_map {
31 enum bpf_map_type map_type;
32 __u32 key_size;
33 __u32 value_size;
34 __u32 max_entries;
35 __u32 id;
36} __attribute__((preserve_access_index));
37
38static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
39 __u32 value_size, __u32 max_entries)
40{
41 VERIFY(map->map_type == g_map_type);
42 VERIFY(map->key_size == key_size);
43 VERIFY(map->value_size == value_size);
44 VERIFY(map->max_entries == max_entries);
45 VERIFY(map->id > 0);
46
47 return 1;
48}
49
50static inline int check_bpf_map_ptr(struct bpf_map *indirect,
51 struct bpf_map *direct)
52{
53 VERIFY(indirect->map_type == direct->map_type);
54 VERIFY(indirect->key_size == direct->key_size);
55 VERIFY(indirect->value_size == direct->value_size);
56 VERIFY(indirect->max_entries == direct->max_entries);
57 VERIFY(indirect->id == direct->id);
58
59 return 1;
60}
61
62static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
63 __u32 key_size, __u32 value_size, __u32 max_entries)
64{
65 VERIFY(check_bpf_map_ptr(indirect, direct));
66 VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
67 max_entries));
68 return 1;
69}
70
71static inline int check_default(struct bpf_map *indirect,
72 struct bpf_map *direct)
73{
74 VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
75 MAX_ENTRIES));
76 return 1;
77}
78
79static __noinline int
80check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
81{
82 VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
83 MAX_ENTRIES));
84 return 1;
85}
86
87typedef struct {
88 int counter;
89} atomic_t;
90
91struct bpf_htab {
92 struct bpf_map map;
93 atomic_t count;
94 __u32 n_buckets;
95 __u32 elem_size;
96} __attribute__((preserve_access_index));
97
98struct {
99 __uint(type, BPF_MAP_TYPE_HASH);
100 __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
101 __uint(max_entries, MAX_ENTRIES);
102 __type(key, __u32);
103 __type(value, __u32);
104} m_hash SEC(".maps");
105
106__s64 bpf_map_sum_elem_count(struct bpf_map *map) __ksym;
107
108static inline int check_hash(void)
109{
110 struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
111 struct bpf_map *map = (struct bpf_map *)&m_hash;
112 int i;
113
114 VERIFY(check_default_noinline(&hash->map, map));
115
116 VERIFY(hash->n_buckets == MAX_ENTRIES);
117 VERIFY(hash->elem_size == 64);
118
119 VERIFY(hash->count.counter == 0);
120 VERIFY(bpf_map_sum_elem_count(map) == 0);
121
122 for (i = 0; i < HALF_ENTRIES; ++i) {
123 const __u32 key = i;
124 const __u32 val = 1;
125
126 if (bpf_map_update_elem(hash, &key, &val, 0))
127 return 0;
128 }
129 VERIFY(hash->count.counter == HALF_ENTRIES);
130 VERIFY(bpf_map_sum_elem_count(map) == HALF_ENTRIES);
131
132 return 1;
133}
134
135struct bpf_array {
136 struct bpf_map map;
137 __u32 elem_size;
138} __attribute__((preserve_access_index));
139
140struct {
141 __uint(type, BPF_MAP_TYPE_ARRAY);
142 __uint(max_entries, MAX_ENTRIES);
143 __type(key, __u32);
144 __type(value, __u32);
145} m_array SEC(".maps");
146
147static inline int check_array(void)
148{
149 struct bpf_array *array = (struct bpf_array *)&m_array;
150 struct bpf_map *map = (struct bpf_map *)&m_array;
151 int i, n_lookups = 0, n_keys = 0;
152
153 VERIFY(check_default(&array->map, map));
154
155 VERIFY(array->elem_size == 8);
156
157 for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
158 const __u32 key = i;
159 __u32 *val = bpf_map_lookup_elem(array, &key);
160
161 ++n_lookups;
162 if (val)
163 ++n_keys;
164 }
165
166 VERIFY(n_lookups == MAX_ENTRIES);
167 VERIFY(n_keys == MAX_ENTRIES);
168
169 return 1;
170}
171
172struct {
173 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
174 __uint(max_entries, MAX_ENTRIES);
175 __type(key, __u32);
176 __type(value, __u32);
177} m_prog_array SEC(".maps");
178
179static inline int check_prog_array(void)
180{
181 struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
182 struct bpf_map *map = (struct bpf_map *)&m_prog_array;
183
184 VERIFY(check_default(&prog_array->map, map));
185
186 return 1;
187}
188
189struct {
190 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
191 __uint(max_entries, MAX_ENTRIES);
192 __type(key, __u32);
193 __type(value, __u32);
194} m_perf_event_array SEC(".maps");
195
196static inline int check_perf_event_array(void)
197{
198 struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
199 struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
200
201 VERIFY(check_default(&perf_event_array->map, map));
202
203 return 1;
204}
205
206struct {
207 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
208 __uint(max_entries, MAX_ENTRIES);
209 __type(key, __u32);
210 __type(value, __u32);
211} m_percpu_hash SEC(".maps");
212
213static inline int check_percpu_hash(void)
214{
215 struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
216 struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
217
218 VERIFY(check_default(&percpu_hash->map, map));
219
220 return 1;
221}
222
223struct {
224 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
225 __uint(max_entries, MAX_ENTRIES);
226 __type(key, __u32);
227 __type(value, __u32);
228} m_percpu_array SEC(".maps");
229
230static inline int check_percpu_array(void)
231{
232 struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
233 struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
234
235 VERIFY(check_default(&percpu_array->map, map));
236
237 return 1;
238}
239
240struct bpf_stack_map {
241 struct bpf_map map;
242} __attribute__((preserve_access_index));
243
244struct {
245 __uint(type, BPF_MAP_TYPE_STACK_TRACE);
246 __uint(max_entries, MAX_ENTRIES);
247 __type(key, __u32);
248 __type(value, __u64);
249} m_stack_trace SEC(".maps");
250
251static inline int check_stack_trace(void)
252{
253 struct bpf_stack_map *stack_trace =
254 (struct bpf_stack_map *)&m_stack_trace;
255 struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
256
257 VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
258 MAX_ENTRIES));
259
260 return 1;
261}
262
263struct {
264 __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
265 __uint(max_entries, MAX_ENTRIES);
266 __type(key, __u32);
267 __type(value, __u32);
268} m_cgroup_array SEC(".maps");
269
270static inline int check_cgroup_array(void)
271{
272 struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
273 struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
274
275 VERIFY(check_default(&cgroup_array->map, map));
276
277 return 1;
278}
279
280struct {
281 __uint(type, BPF_MAP_TYPE_LRU_HASH);
282 __uint(max_entries, MAX_ENTRIES);
283 __type(key, __u32);
284 __type(value, __u32);
285} m_lru_hash SEC(".maps");
286
287static inline int check_lru_hash(void)
288{
289 struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
290 struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
291
292 VERIFY(check_default(&lru_hash->map, map));
293
294 return 1;
295}
296
297struct {
298 __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
299 __uint(max_entries, MAX_ENTRIES);
300 __type(key, __u32);
301 __type(value, __u32);
302} m_lru_percpu_hash SEC(".maps");
303
304static inline int check_lru_percpu_hash(void)
305{
306 struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
307 struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
308
309 VERIFY(check_default(&lru_percpu_hash->map, map));
310
311 return 1;
312}
313
314struct lpm_trie {
315 struct bpf_map map;
316} __attribute__((preserve_access_index));
317
318struct lpm_key {
319 struct bpf_lpm_trie_key_hdr trie_key;
320 __u32 data;
321};
322
323struct {
324 __uint(type, BPF_MAP_TYPE_LPM_TRIE);
325 __uint(map_flags, BPF_F_NO_PREALLOC);
326 __uint(max_entries, MAX_ENTRIES);
327 __type(key, struct lpm_key);
328 __type(value, __u32);
329} m_lpm_trie SEC(".maps");
330
331static inline int check_lpm_trie(void)
332{
333 struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
334 struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
335
336 VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
337 MAX_ENTRIES));
338
339 return 1;
340}
341
342#define INNER_MAX_ENTRIES 1234
343
344struct inner_map {
345 __uint(type, BPF_MAP_TYPE_ARRAY);
346 __uint(max_entries, INNER_MAX_ENTRIES);
347 __type(key, __u32);
348 __type(value, __u32);
349} inner_map SEC(".maps");
350
351struct {
352 __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
353 __uint(max_entries, MAX_ENTRIES);
354 __type(key, __u32);
355 __type(value, __u32);
356 __array(values, struct {
357 __uint(type, BPF_MAP_TYPE_ARRAY);
358 __uint(max_entries, INNER_MAX_ENTRIES);
359 __type(key, __u32);
360 __type(value, __u32);
361 });
362} m_array_of_maps SEC(".maps") = {
363 .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
364};
365
366static inline int check_array_of_maps(void)
367{
368 struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
369 struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
370 struct bpf_array *inner_map;
371 int key = 0;
372
373 VERIFY(check_default(&array_of_maps->map, map));
374 inner_map = bpf_map_lookup_elem(array_of_maps, &key);
375 VERIFY(inner_map != NULL);
376 VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
377
378 return 1;
379}
380
381struct {
382 __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
383 __uint(max_entries, MAX_ENTRIES);
384 __type(key, __u32);
385 __type(value, __u32);
386 __array(values, struct inner_map);
387} m_hash_of_maps SEC(".maps") = {
388 .values = {
389 [2] = &inner_map,
390 },
391};
392
393static inline int check_hash_of_maps(void)
394{
395 struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
396 struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
397 struct bpf_htab *inner_map;
398 int key = 2;
399
400 VERIFY(check_default(&hash_of_maps->map, map));
401 inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
402 VERIFY(inner_map != NULL);
403 VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
404
405 return 1;
406}
407
408struct bpf_dtab {
409 struct bpf_map map;
410} __attribute__((preserve_access_index));
411
412struct {
413 __uint(type, BPF_MAP_TYPE_DEVMAP);
414 __uint(max_entries, MAX_ENTRIES);
415 __type(key, __u32);
416 __type(value, __u32);
417} m_devmap SEC(".maps");
418
419static inline int check_devmap(void)
420{
421 struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
422 struct bpf_map *map = (struct bpf_map *)&m_devmap;
423
424 VERIFY(check_default(&devmap->map, map));
425
426 return 1;
427}
428
429struct bpf_stab {
430 struct bpf_map map;
431} __attribute__((preserve_access_index));
432
433struct {
434 __uint(type, BPF_MAP_TYPE_SOCKMAP);
435 __uint(max_entries, MAX_ENTRIES);
436 __type(key, __u32);
437 __type(value, __u32);
438} m_sockmap SEC(".maps");
439
440static inline int check_sockmap(void)
441{
442 struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
443 struct bpf_map *map = (struct bpf_map *)&m_sockmap;
444
445 VERIFY(check_default(&sockmap->map, map));
446
447 return 1;
448}
449
450struct bpf_cpu_map {
451 struct bpf_map map;
452} __attribute__((preserve_access_index));
453
454struct {
455 __uint(type, BPF_MAP_TYPE_CPUMAP);
456 __uint(max_entries, MAX_ENTRIES);
457 __type(key, __u32);
458 __type(value, __u32);
459} m_cpumap SEC(".maps");
460
461static inline int check_cpumap(void)
462{
463 struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
464 struct bpf_map *map = (struct bpf_map *)&m_cpumap;
465
466 VERIFY(check_default(&cpumap->map, map));
467
468 return 1;
469}
470
471struct xsk_map {
472 struct bpf_map map;
473} __attribute__((preserve_access_index));
474
475struct {
476 __uint(type, BPF_MAP_TYPE_XSKMAP);
477 __uint(max_entries, MAX_ENTRIES);
478 __type(key, __u32);
479 __type(value, __u32);
480} m_xskmap SEC(".maps");
481
482static inline int check_xskmap(void)
483{
484 struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
485 struct bpf_map *map = (struct bpf_map *)&m_xskmap;
486
487 VERIFY(check_default(&xskmap->map, map));
488
489 return 1;
490}
491
492struct bpf_shtab {
493 struct bpf_map map;
494} __attribute__((preserve_access_index));
495
496struct {
497 __uint(type, BPF_MAP_TYPE_SOCKHASH);
498 __uint(max_entries, MAX_ENTRIES);
499 __type(key, __u32);
500 __type(value, __u32);
501} m_sockhash SEC(".maps");
502
503static inline int check_sockhash(void)
504{
505 struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
506 struct bpf_map *map = (struct bpf_map *)&m_sockhash;
507
508 VERIFY(check_default(&sockhash->map, map));
509
510 return 1;
511}
512
513struct bpf_cgroup_storage_map {
514 struct bpf_map map;
515} __attribute__((preserve_access_index));
516
517struct {
518 __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
519 __type(key, struct bpf_cgroup_storage_key);
520 __type(value, __u32);
521} m_cgroup_storage SEC(".maps");
522
523static inline int check_cgroup_storage(void)
524{
525 struct bpf_cgroup_storage_map *cgroup_storage =
526 (struct bpf_cgroup_storage_map *)&m_cgroup_storage;
527 struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
528
529 VERIFY(check(&cgroup_storage->map, map,
530 sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
531
532 return 1;
533}
534
535struct reuseport_array {
536 struct bpf_map map;
537} __attribute__((preserve_access_index));
538
539struct {
540 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
541 __uint(max_entries, MAX_ENTRIES);
542 __type(key, __u32);
543 __type(value, __u32);
544} m_reuseport_sockarray SEC(".maps");
545
546static inline int check_reuseport_sockarray(void)
547{
548 struct reuseport_array *reuseport_sockarray =
549 (struct reuseport_array *)&m_reuseport_sockarray;
550 struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
551
552 VERIFY(check_default(&reuseport_sockarray->map, map));
553
554 return 1;
555}
556
557struct {
558 __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
559 __type(key, struct bpf_cgroup_storage_key);
560 __type(value, __u32);
561} m_percpu_cgroup_storage SEC(".maps");
562
563static inline int check_percpu_cgroup_storage(void)
564{
565 struct bpf_cgroup_storage_map *percpu_cgroup_storage =
566 (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
567 struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
568
569 VERIFY(check(&percpu_cgroup_storage->map, map,
570 sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
571
572 return 1;
573}
574
575struct bpf_queue_stack {
576 struct bpf_map map;
577} __attribute__((preserve_access_index));
578
579struct {
580 __uint(type, BPF_MAP_TYPE_QUEUE);
581 __uint(max_entries, MAX_ENTRIES);
582 __type(value, __u32);
583} m_queue SEC(".maps");
584
585static inline int check_queue(void)
586{
587 struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
588 struct bpf_map *map = (struct bpf_map *)&m_queue;
589
590 VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
591
592 return 1;
593}
594
595struct {
596 __uint(type, BPF_MAP_TYPE_STACK);
597 __uint(max_entries, MAX_ENTRIES);
598 __type(value, __u32);
599} m_stack SEC(".maps");
600
601static inline int check_stack(void)
602{
603 struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
604 struct bpf_map *map = (struct bpf_map *)&m_stack;
605
606 VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
607
608 return 1;
609}
610
611struct bpf_local_storage_map {
612 struct bpf_map map;
613} __attribute__((preserve_access_index));
614
615struct {
616 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
617 __uint(map_flags, BPF_F_NO_PREALLOC);
618 __type(key, __u32);
619 __type(value, __u32);
620} m_sk_storage SEC(".maps");
621
622static inline int check_sk_storage(void)
623{
624 struct bpf_local_storage_map *sk_storage =
625 (struct bpf_local_storage_map *)&m_sk_storage;
626 struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
627
628 VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
629
630 return 1;
631}
632
633struct {
634 __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
635 __uint(max_entries, MAX_ENTRIES);
636 __type(key, __u32);
637 __type(value, __u32);
638} m_devmap_hash SEC(".maps");
639
640static inline int check_devmap_hash(void)
641{
642 struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
643 struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
644
645 VERIFY(check_default(&devmap_hash->map, map));
646
647 return 1;
648}
649
650struct bpf_ringbuf_map {
651 struct bpf_map map;
652} __attribute__((preserve_access_index));
653
654struct {
655 __uint(type, BPF_MAP_TYPE_RINGBUF);
656} m_ringbuf SEC(".maps");
657
658static inline int check_ringbuf(void)
659{
660 struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
661 struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
662
663 VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
664
665 return 1;
666}
667
668SEC("cgroup_skb/egress")
669int cg_skb(void *ctx)
670{
671 VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
672 VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
673 VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
674 VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
675 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
676 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
677 VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
678 VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
679 VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
680 VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
681 VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
682 VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
683 VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
684 VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
685 VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
686 VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
687 VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
688 VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
689 VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
690 VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
691 check_reuseport_sockarray);
692 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
693 check_percpu_cgroup_storage);
694 VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
695 VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
696 VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
697 VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
698 VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
699
700 return 1;
701}
702
703char _license[] SEC("license") = "GPL";
1// SPDX-License-Identifier: GPL-2.0
2// Copyright (c) 2020 Facebook
3
4#include <linux/bpf.h>
5#include <bpf/bpf_helpers.h>
6
7#define LOOP_BOUND 0xf
8#define MAX_ENTRIES 8
9#define HALF_ENTRIES (MAX_ENTRIES >> 1)
10
11_Static_assert(MAX_ENTRIES < LOOP_BOUND, "MAX_ENTRIES must be < LOOP_BOUND");
12
13enum bpf_map_type g_map_type = BPF_MAP_TYPE_UNSPEC;
14__u32 g_line = 0;
15int page_size = 0; /* userspace should set it */
16
17#define VERIFY_TYPE(type, func) ({ \
18 g_map_type = type; \
19 if (!func()) \
20 return 0; \
21})
22
23
24#define VERIFY(expr) ({ \
25 g_line = __LINE__; \
26 if (!(expr)) \
27 return 0; \
28})
29
30struct bpf_map {
31 enum bpf_map_type map_type;
32 __u32 key_size;
33 __u32 value_size;
34 __u32 max_entries;
35 __u32 id;
36} __attribute__((preserve_access_index));
37
38static inline int check_bpf_map_fields(struct bpf_map *map, __u32 key_size,
39 __u32 value_size, __u32 max_entries)
40{
41 VERIFY(map->map_type == g_map_type);
42 VERIFY(map->key_size == key_size);
43 VERIFY(map->value_size == value_size);
44 VERIFY(map->max_entries == max_entries);
45 VERIFY(map->id > 0);
46
47 return 1;
48}
49
50static inline int check_bpf_map_ptr(struct bpf_map *indirect,
51 struct bpf_map *direct)
52{
53 VERIFY(indirect->map_type == direct->map_type);
54 VERIFY(indirect->key_size == direct->key_size);
55 VERIFY(indirect->value_size == direct->value_size);
56 VERIFY(indirect->max_entries == direct->max_entries);
57 VERIFY(indirect->id == direct->id);
58
59 return 1;
60}
61
62static inline int check(struct bpf_map *indirect, struct bpf_map *direct,
63 __u32 key_size, __u32 value_size, __u32 max_entries)
64{
65 VERIFY(check_bpf_map_ptr(indirect, direct));
66 VERIFY(check_bpf_map_fields(indirect, key_size, value_size,
67 max_entries));
68 return 1;
69}
70
71static inline int check_default(struct bpf_map *indirect,
72 struct bpf_map *direct)
73{
74 VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
75 MAX_ENTRIES));
76 return 1;
77}
78
79static __noinline int
80check_default_noinline(struct bpf_map *indirect, struct bpf_map *direct)
81{
82 VERIFY(check(indirect, direct, sizeof(__u32), sizeof(__u32),
83 MAX_ENTRIES));
84 return 1;
85}
86
87typedef struct {
88 int counter;
89} atomic_t;
90
91struct bpf_htab {
92 struct bpf_map map;
93 atomic_t count;
94 __u32 n_buckets;
95 __u32 elem_size;
96} __attribute__((preserve_access_index));
97
98struct {
99 __uint(type, BPF_MAP_TYPE_HASH);
100 __uint(map_flags, BPF_F_NO_PREALLOC); /* to test bpf_htab.count */
101 __uint(max_entries, MAX_ENTRIES);
102 __type(key, __u32);
103 __type(value, __u32);
104} m_hash SEC(".maps");
105
106static inline int check_hash(void)
107{
108 struct bpf_htab *hash = (struct bpf_htab *)&m_hash;
109 struct bpf_map *map = (struct bpf_map *)&m_hash;
110 int i;
111
112 VERIFY(check_default_noinline(&hash->map, map));
113
114 VERIFY(hash->n_buckets == MAX_ENTRIES);
115 VERIFY(hash->elem_size == 64);
116
117 VERIFY(hash->count.counter == 0);
118 for (i = 0; i < HALF_ENTRIES; ++i) {
119 const __u32 key = i;
120 const __u32 val = 1;
121
122 if (bpf_map_update_elem(hash, &key, &val, 0))
123 return 0;
124 }
125 VERIFY(hash->count.counter == HALF_ENTRIES);
126
127 return 1;
128}
129
130struct bpf_array {
131 struct bpf_map map;
132 __u32 elem_size;
133} __attribute__((preserve_access_index));
134
135struct {
136 __uint(type, BPF_MAP_TYPE_ARRAY);
137 __uint(max_entries, MAX_ENTRIES);
138 __type(key, __u32);
139 __type(value, __u32);
140} m_array SEC(".maps");
141
142static inline int check_array(void)
143{
144 struct bpf_array *array = (struct bpf_array *)&m_array;
145 struct bpf_map *map = (struct bpf_map *)&m_array;
146 int i, n_lookups = 0, n_keys = 0;
147
148 VERIFY(check_default(&array->map, map));
149
150 VERIFY(array->elem_size == 8);
151
152 for (i = 0; i < array->map.max_entries && i < LOOP_BOUND; ++i) {
153 const __u32 key = i;
154 __u32 *val = bpf_map_lookup_elem(array, &key);
155
156 ++n_lookups;
157 if (val)
158 ++n_keys;
159 }
160
161 VERIFY(n_lookups == MAX_ENTRIES);
162 VERIFY(n_keys == MAX_ENTRIES);
163
164 return 1;
165}
166
167struct {
168 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
169 __uint(max_entries, MAX_ENTRIES);
170 __type(key, __u32);
171 __type(value, __u32);
172} m_prog_array SEC(".maps");
173
174static inline int check_prog_array(void)
175{
176 struct bpf_array *prog_array = (struct bpf_array *)&m_prog_array;
177 struct bpf_map *map = (struct bpf_map *)&m_prog_array;
178
179 VERIFY(check_default(&prog_array->map, map));
180
181 return 1;
182}
183
184struct {
185 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
186 __uint(max_entries, MAX_ENTRIES);
187 __type(key, __u32);
188 __type(value, __u32);
189} m_perf_event_array SEC(".maps");
190
191static inline int check_perf_event_array(void)
192{
193 struct bpf_array *perf_event_array = (struct bpf_array *)&m_perf_event_array;
194 struct bpf_map *map = (struct bpf_map *)&m_perf_event_array;
195
196 VERIFY(check_default(&perf_event_array->map, map));
197
198 return 1;
199}
200
201struct {
202 __uint(type, BPF_MAP_TYPE_PERCPU_HASH);
203 __uint(max_entries, MAX_ENTRIES);
204 __type(key, __u32);
205 __type(value, __u32);
206} m_percpu_hash SEC(".maps");
207
208static inline int check_percpu_hash(void)
209{
210 struct bpf_htab *percpu_hash = (struct bpf_htab *)&m_percpu_hash;
211 struct bpf_map *map = (struct bpf_map *)&m_percpu_hash;
212
213 VERIFY(check_default(&percpu_hash->map, map));
214
215 return 1;
216}
217
218struct {
219 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
220 __uint(max_entries, MAX_ENTRIES);
221 __type(key, __u32);
222 __type(value, __u32);
223} m_percpu_array SEC(".maps");
224
225static inline int check_percpu_array(void)
226{
227 struct bpf_array *percpu_array = (struct bpf_array *)&m_percpu_array;
228 struct bpf_map *map = (struct bpf_map *)&m_percpu_array;
229
230 VERIFY(check_default(&percpu_array->map, map));
231
232 return 1;
233}
234
235struct bpf_stack_map {
236 struct bpf_map map;
237} __attribute__((preserve_access_index));
238
239struct {
240 __uint(type, BPF_MAP_TYPE_STACK_TRACE);
241 __uint(max_entries, MAX_ENTRIES);
242 __type(key, __u32);
243 __type(value, __u64);
244} m_stack_trace SEC(".maps");
245
246static inline int check_stack_trace(void)
247{
248 struct bpf_stack_map *stack_trace =
249 (struct bpf_stack_map *)&m_stack_trace;
250 struct bpf_map *map = (struct bpf_map *)&m_stack_trace;
251
252 VERIFY(check(&stack_trace->map, map, sizeof(__u32), sizeof(__u64),
253 MAX_ENTRIES));
254
255 return 1;
256}
257
258struct {
259 __uint(type, BPF_MAP_TYPE_CGROUP_ARRAY);
260 __uint(max_entries, MAX_ENTRIES);
261 __type(key, __u32);
262 __type(value, __u32);
263} m_cgroup_array SEC(".maps");
264
265static inline int check_cgroup_array(void)
266{
267 struct bpf_array *cgroup_array = (struct bpf_array *)&m_cgroup_array;
268 struct bpf_map *map = (struct bpf_map *)&m_cgroup_array;
269
270 VERIFY(check_default(&cgroup_array->map, map));
271
272 return 1;
273}
274
275struct {
276 __uint(type, BPF_MAP_TYPE_LRU_HASH);
277 __uint(max_entries, MAX_ENTRIES);
278 __type(key, __u32);
279 __type(value, __u32);
280} m_lru_hash SEC(".maps");
281
282static inline int check_lru_hash(void)
283{
284 struct bpf_htab *lru_hash = (struct bpf_htab *)&m_lru_hash;
285 struct bpf_map *map = (struct bpf_map *)&m_lru_hash;
286
287 VERIFY(check_default(&lru_hash->map, map));
288
289 return 1;
290}
291
292struct {
293 __uint(type, BPF_MAP_TYPE_LRU_PERCPU_HASH);
294 __uint(max_entries, MAX_ENTRIES);
295 __type(key, __u32);
296 __type(value, __u32);
297} m_lru_percpu_hash SEC(".maps");
298
299static inline int check_lru_percpu_hash(void)
300{
301 struct bpf_htab *lru_percpu_hash = (struct bpf_htab *)&m_lru_percpu_hash;
302 struct bpf_map *map = (struct bpf_map *)&m_lru_percpu_hash;
303
304 VERIFY(check_default(&lru_percpu_hash->map, map));
305
306 return 1;
307}
308
309struct lpm_trie {
310 struct bpf_map map;
311} __attribute__((preserve_access_index));
312
313struct lpm_key {
314 struct bpf_lpm_trie_key trie_key;
315 __u32 data;
316};
317
318struct {
319 __uint(type, BPF_MAP_TYPE_LPM_TRIE);
320 __uint(map_flags, BPF_F_NO_PREALLOC);
321 __uint(max_entries, MAX_ENTRIES);
322 __type(key, struct lpm_key);
323 __type(value, __u32);
324} m_lpm_trie SEC(".maps");
325
326static inline int check_lpm_trie(void)
327{
328 struct lpm_trie *lpm_trie = (struct lpm_trie *)&m_lpm_trie;
329 struct bpf_map *map = (struct bpf_map *)&m_lpm_trie;
330
331 VERIFY(check(&lpm_trie->map, map, sizeof(struct lpm_key), sizeof(__u32),
332 MAX_ENTRIES));
333
334 return 1;
335}
336
337#define INNER_MAX_ENTRIES 1234
338
339struct inner_map {
340 __uint(type, BPF_MAP_TYPE_ARRAY);
341 __uint(max_entries, INNER_MAX_ENTRIES);
342 __type(key, __u32);
343 __type(value, __u32);
344} inner_map SEC(".maps");
345
346struct {
347 __uint(type, BPF_MAP_TYPE_ARRAY_OF_MAPS);
348 __uint(max_entries, MAX_ENTRIES);
349 __type(key, __u32);
350 __type(value, __u32);
351 __array(values, struct {
352 __uint(type, BPF_MAP_TYPE_ARRAY);
353 __uint(max_entries, INNER_MAX_ENTRIES);
354 __type(key, __u32);
355 __type(value, __u32);
356 });
357} m_array_of_maps SEC(".maps") = {
358 .values = { (void *)&inner_map, 0, 0, 0, 0, 0, 0, 0, 0 },
359};
360
361static inline int check_array_of_maps(void)
362{
363 struct bpf_array *array_of_maps = (struct bpf_array *)&m_array_of_maps;
364 struct bpf_map *map = (struct bpf_map *)&m_array_of_maps;
365 struct bpf_array *inner_map;
366 int key = 0;
367
368 VERIFY(check_default(&array_of_maps->map, map));
369 inner_map = bpf_map_lookup_elem(array_of_maps, &key);
370 VERIFY(inner_map != NULL);
371 VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
372
373 return 1;
374}
375
376struct {
377 __uint(type, BPF_MAP_TYPE_HASH_OF_MAPS);
378 __uint(max_entries, MAX_ENTRIES);
379 __type(key, __u32);
380 __type(value, __u32);
381 __array(values, struct inner_map);
382} m_hash_of_maps SEC(".maps") = {
383 .values = {
384 [2] = &inner_map,
385 },
386};
387
388static inline int check_hash_of_maps(void)
389{
390 struct bpf_htab *hash_of_maps = (struct bpf_htab *)&m_hash_of_maps;
391 struct bpf_map *map = (struct bpf_map *)&m_hash_of_maps;
392 struct bpf_htab *inner_map;
393 int key = 2;
394
395 VERIFY(check_default(&hash_of_maps->map, map));
396 inner_map = bpf_map_lookup_elem(hash_of_maps, &key);
397 VERIFY(inner_map != NULL);
398 VERIFY(inner_map->map.max_entries == INNER_MAX_ENTRIES);
399
400 return 1;
401}
402
403struct bpf_dtab {
404 struct bpf_map map;
405} __attribute__((preserve_access_index));
406
407struct {
408 __uint(type, BPF_MAP_TYPE_DEVMAP);
409 __uint(max_entries, MAX_ENTRIES);
410 __type(key, __u32);
411 __type(value, __u32);
412} m_devmap SEC(".maps");
413
414static inline int check_devmap(void)
415{
416 struct bpf_dtab *devmap = (struct bpf_dtab *)&m_devmap;
417 struct bpf_map *map = (struct bpf_map *)&m_devmap;
418
419 VERIFY(check_default(&devmap->map, map));
420
421 return 1;
422}
423
424struct bpf_stab {
425 struct bpf_map map;
426} __attribute__((preserve_access_index));
427
428struct {
429 __uint(type, BPF_MAP_TYPE_SOCKMAP);
430 __uint(max_entries, MAX_ENTRIES);
431 __type(key, __u32);
432 __type(value, __u32);
433} m_sockmap SEC(".maps");
434
435static inline int check_sockmap(void)
436{
437 struct bpf_stab *sockmap = (struct bpf_stab *)&m_sockmap;
438 struct bpf_map *map = (struct bpf_map *)&m_sockmap;
439
440 VERIFY(check_default(&sockmap->map, map));
441
442 return 1;
443}
444
445struct bpf_cpu_map {
446 struct bpf_map map;
447} __attribute__((preserve_access_index));
448
449struct {
450 __uint(type, BPF_MAP_TYPE_CPUMAP);
451 __uint(max_entries, MAX_ENTRIES);
452 __type(key, __u32);
453 __type(value, __u32);
454} m_cpumap SEC(".maps");
455
456static inline int check_cpumap(void)
457{
458 struct bpf_cpu_map *cpumap = (struct bpf_cpu_map *)&m_cpumap;
459 struct bpf_map *map = (struct bpf_map *)&m_cpumap;
460
461 VERIFY(check_default(&cpumap->map, map));
462
463 return 1;
464}
465
466struct xsk_map {
467 struct bpf_map map;
468} __attribute__((preserve_access_index));
469
470struct {
471 __uint(type, BPF_MAP_TYPE_XSKMAP);
472 __uint(max_entries, MAX_ENTRIES);
473 __type(key, __u32);
474 __type(value, __u32);
475} m_xskmap SEC(".maps");
476
477static inline int check_xskmap(void)
478{
479 struct xsk_map *xskmap = (struct xsk_map *)&m_xskmap;
480 struct bpf_map *map = (struct bpf_map *)&m_xskmap;
481
482 VERIFY(check_default(&xskmap->map, map));
483
484 return 1;
485}
486
487struct bpf_shtab {
488 struct bpf_map map;
489} __attribute__((preserve_access_index));
490
491struct {
492 __uint(type, BPF_MAP_TYPE_SOCKHASH);
493 __uint(max_entries, MAX_ENTRIES);
494 __type(key, __u32);
495 __type(value, __u32);
496} m_sockhash SEC(".maps");
497
498static inline int check_sockhash(void)
499{
500 struct bpf_shtab *sockhash = (struct bpf_shtab *)&m_sockhash;
501 struct bpf_map *map = (struct bpf_map *)&m_sockhash;
502
503 VERIFY(check_default(&sockhash->map, map));
504
505 return 1;
506}
507
508struct bpf_cgroup_storage_map {
509 struct bpf_map map;
510} __attribute__((preserve_access_index));
511
512struct {
513 __uint(type, BPF_MAP_TYPE_CGROUP_STORAGE);
514 __type(key, struct bpf_cgroup_storage_key);
515 __type(value, __u32);
516} m_cgroup_storage SEC(".maps");
517
518static inline int check_cgroup_storage(void)
519{
520 struct bpf_cgroup_storage_map *cgroup_storage =
521 (struct bpf_cgroup_storage_map *)&m_cgroup_storage;
522 struct bpf_map *map = (struct bpf_map *)&m_cgroup_storage;
523
524 VERIFY(check(&cgroup_storage->map, map,
525 sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
526
527 return 1;
528}
529
530struct reuseport_array {
531 struct bpf_map map;
532} __attribute__((preserve_access_index));
533
534struct {
535 __uint(type, BPF_MAP_TYPE_REUSEPORT_SOCKARRAY);
536 __uint(max_entries, MAX_ENTRIES);
537 __type(key, __u32);
538 __type(value, __u32);
539} m_reuseport_sockarray SEC(".maps");
540
541static inline int check_reuseport_sockarray(void)
542{
543 struct reuseport_array *reuseport_sockarray =
544 (struct reuseport_array *)&m_reuseport_sockarray;
545 struct bpf_map *map = (struct bpf_map *)&m_reuseport_sockarray;
546
547 VERIFY(check_default(&reuseport_sockarray->map, map));
548
549 return 1;
550}
551
552struct {
553 __uint(type, BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE);
554 __type(key, struct bpf_cgroup_storage_key);
555 __type(value, __u32);
556} m_percpu_cgroup_storage SEC(".maps");
557
558static inline int check_percpu_cgroup_storage(void)
559{
560 struct bpf_cgroup_storage_map *percpu_cgroup_storage =
561 (struct bpf_cgroup_storage_map *)&m_percpu_cgroup_storage;
562 struct bpf_map *map = (struct bpf_map *)&m_percpu_cgroup_storage;
563
564 VERIFY(check(&percpu_cgroup_storage->map, map,
565 sizeof(struct bpf_cgroup_storage_key), sizeof(__u32), 0));
566
567 return 1;
568}
569
570struct bpf_queue_stack {
571 struct bpf_map map;
572} __attribute__((preserve_access_index));
573
574struct {
575 __uint(type, BPF_MAP_TYPE_QUEUE);
576 __uint(max_entries, MAX_ENTRIES);
577 __type(value, __u32);
578} m_queue SEC(".maps");
579
580static inline int check_queue(void)
581{
582 struct bpf_queue_stack *queue = (struct bpf_queue_stack *)&m_queue;
583 struct bpf_map *map = (struct bpf_map *)&m_queue;
584
585 VERIFY(check(&queue->map, map, 0, sizeof(__u32), MAX_ENTRIES));
586
587 return 1;
588}
589
590struct {
591 __uint(type, BPF_MAP_TYPE_STACK);
592 __uint(max_entries, MAX_ENTRIES);
593 __type(value, __u32);
594} m_stack SEC(".maps");
595
596static inline int check_stack(void)
597{
598 struct bpf_queue_stack *stack = (struct bpf_queue_stack *)&m_stack;
599 struct bpf_map *map = (struct bpf_map *)&m_stack;
600
601 VERIFY(check(&stack->map, map, 0, sizeof(__u32), MAX_ENTRIES));
602
603 return 1;
604}
605
606struct bpf_local_storage_map {
607 struct bpf_map map;
608} __attribute__((preserve_access_index));
609
610struct {
611 __uint(type, BPF_MAP_TYPE_SK_STORAGE);
612 __uint(map_flags, BPF_F_NO_PREALLOC);
613 __type(key, __u32);
614 __type(value, __u32);
615} m_sk_storage SEC(".maps");
616
617static inline int check_sk_storage(void)
618{
619 struct bpf_local_storage_map *sk_storage =
620 (struct bpf_local_storage_map *)&m_sk_storage;
621 struct bpf_map *map = (struct bpf_map *)&m_sk_storage;
622
623 VERIFY(check(&sk_storage->map, map, sizeof(__u32), sizeof(__u32), 0));
624
625 return 1;
626}
627
628struct {
629 __uint(type, BPF_MAP_TYPE_DEVMAP_HASH);
630 __uint(max_entries, MAX_ENTRIES);
631 __type(key, __u32);
632 __type(value, __u32);
633} m_devmap_hash SEC(".maps");
634
635static inline int check_devmap_hash(void)
636{
637 struct bpf_dtab *devmap_hash = (struct bpf_dtab *)&m_devmap_hash;
638 struct bpf_map *map = (struct bpf_map *)&m_devmap_hash;
639
640 VERIFY(check_default(&devmap_hash->map, map));
641
642 return 1;
643}
644
645struct bpf_ringbuf_map {
646 struct bpf_map map;
647} __attribute__((preserve_access_index));
648
649struct {
650 __uint(type, BPF_MAP_TYPE_RINGBUF);
651} m_ringbuf SEC(".maps");
652
653static inline int check_ringbuf(void)
654{
655 struct bpf_ringbuf_map *ringbuf = (struct bpf_ringbuf_map *)&m_ringbuf;
656 struct bpf_map *map = (struct bpf_map *)&m_ringbuf;
657
658 VERIFY(check(&ringbuf->map, map, 0, 0, page_size));
659
660 return 1;
661}
662
663SEC("cgroup_skb/egress")
664int cg_skb(void *ctx)
665{
666 VERIFY_TYPE(BPF_MAP_TYPE_HASH, check_hash);
667 VERIFY_TYPE(BPF_MAP_TYPE_ARRAY, check_array);
668 VERIFY_TYPE(BPF_MAP_TYPE_PROG_ARRAY, check_prog_array);
669 VERIFY_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, check_perf_event_array);
670 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_HASH, check_percpu_hash);
671 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, check_percpu_array);
672 VERIFY_TYPE(BPF_MAP_TYPE_STACK_TRACE, check_stack_trace);
673 VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, check_cgroup_array);
674 VERIFY_TYPE(BPF_MAP_TYPE_LRU_HASH, check_lru_hash);
675 VERIFY_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, check_lru_percpu_hash);
676 VERIFY_TYPE(BPF_MAP_TYPE_LPM_TRIE, check_lpm_trie);
677 VERIFY_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, check_array_of_maps);
678 VERIFY_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, check_hash_of_maps);
679 VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP, check_devmap);
680 VERIFY_TYPE(BPF_MAP_TYPE_SOCKMAP, check_sockmap);
681 VERIFY_TYPE(BPF_MAP_TYPE_CPUMAP, check_cpumap);
682 VERIFY_TYPE(BPF_MAP_TYPE_XSKMAP, check_xskmap);
683 VERIFY_TYPE(BPF_MAP_TYPE_SOCKHASH, check_sockhash);
684 VERIFY_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, check_cgroup_storage);
685 VERIFY_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
686 check_reuseport_sockarray);
687 VERIFY_TYPE(BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE,
688 check_percpu_cgroup_storage);
689 VERIFY_TYPE(BPF_MAP_TYPE_QUEUE, check_queue);
690 VERIFY_TYPE(BPF_MAP_TYPE_STACK, check_stack);
691 VERIFY_TYPE(BPF_MAP_TYPE_SK_STORAGE, check_sk_storage);
692 VERIFY_TYPE(BPF_MAP_TYPE_DEVMAP_HASH, check_devmap_hash);
693 VERIFY_TYPE(BPF_MAP_TYPE_RINGBUF, check_ringbuf);
694
695 return 1;
696}
697
698char _license[] SEC("license") = "GPL";