Loading...
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2// Copyright (c) 2020 Facebook
3#include <vmlinux.h>
4#include <bpf/bpf_helpers.h>
5#include <bpf/bpf_tracing.h>
6
7struct bpf_perf_event_value___local {
8 __u64 counter;
9 __u64 enabled;
10 __u64 running;
11} __attribute__((preserve_access_index));
12
13/* map of perf event fds, num_cpu * num_metric entries */
14struct {
15 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
16 __uint(key_size, sizeof(u32));
17 __uint(value_size, sizeof(int));
18} events SEC(".maps");
19
20/* readings at fentry */
21struct {
22 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
23 __uint(key_size, sizeof(u32));
24 __uint(value_size, sizeof(struct bpf_perf_event_value___local));
25} fentry_readings SEC(".maps");
26
27/* accumulated readings */
28struct {
29 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
30 __uint(key_size, sizeof(u32));
31 __uint(value_size, sizeof(struct bpf_perf_event_value___local));
32} accum_readings SEC(".maps");
33
34/* sample counts, one per cpu */
35struct {
36 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
37 __uint(key_size, sizeof(u32));
38 __uint(value_size, sizeof(u64));
39} counts SEC(".maps");
40
41const volatile __u32 num_cpu = 1;
42const volatile __u32 num_metric = 1;
43#define MAX_NUM_METRICS 4
44
45SEC("fentry/XXX")
46int BPF_PROG(fentry_XXX)
47{
48 struct bpf_perf_event_value___local *ptrs[MAX_NUM_METRICS];
49 u32 key = bpf_get_smp_processor_id();
50 u32 i;
51
52 /* look up before reading, to reduce error */
53 for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
54 u32 flag = i;
55
56 ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
57 if (!ptrs[i])
58 return 0;
59 }
60
61 for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
62 struct bpf_perf_event_value___local reading;
63 int err;
64
65 err = bpf_perf_event_read_value(&events, key, (void *)&reading,
66 sizeof(reading));
67 if (err)
68 return 0;
69 *(ptrs[i]) = reading;
70 key += num_cpu;
71 }
72
73 return 0;
74}
75
76static inline void
77fexit_update_maps(u32 id, struct bpf_perf_event_value___local *after)
78{
79 struct bpf_perf_event_value___local *before, diff;
80
81 before = bpf_map_lookup_elem(&fentry_readings, &id);
82 /* only account samples with a valid fentry_reading */
83 if (before && before->counter) {
84 struct bpf_perf_event_value___local *accum;
85
86 diff.counter = after->counter - before->counter;
87 diff.enabled = after->enabled - before->enabled;
88 diff.running = after->running - before->running;
89
90 accum = bpf_map_lookup_elem(&accum_readings, &id);
91 if (accum) {
92 accum->counter += diff.counter;
93 accum->enabled += diff.enabled;
94 accum->running += diff.running;
95 }
96 }
97}
98
99SEC("fexit/XXX")
100int BPF_PROG(fexit_XXX)
101{
102 struct bpf_perf_event_value___local readings[MAX_NUM_METRICS];
103 u32 cpu = bpf_get_smp_processor_id();
104 u32 i, zero = 0;
105 int err;
106 u64 *count;
107
108 /* read all events before updating the maps, to reduce error */
109 for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++) {
110 err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
111 (void *)(readings + i),
112 sizeof(*readings));
113 if (err)
114 return 0;
115 }
116 count = bpf_map_lookup_elem(&counts, &zero);
117 if (count) {
118 *count += 1;
119 for (i = 0; i < num_metric && i < MAX_NUM_METRICS; i++)
120 fexit_update_maps(i, &readings[i]);
121 }
122 return 0;
123}
124
125char LICENSE[] SEC("license") = "Dual BSD/GPL";
1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2// Copyright (c) 2020 Facebook
3#include <vmlinux.h>
4#include <bpf/bpf_helpers.h>
5#include <bpf/bpf_tracing.h>
6
7/* map of perf event fds, num_cpu * num_metric entries */
8struct {
9 __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
10 __uint(key_size, sizeof(u32));
11 __uint(value_size, sizeof(int));
12} events SEC(".maps");
13
14/* readings at fentry */
15struct {
16 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
17 __uint(key_size, sizeof(u32));
18 __uint(value_size, sizeof(struct bpf_perf_event_value));
19} fentry_readings SEC(".maps");
20
21/* accumulated readings */
22struct {
23 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
24 __uint(key_size, sizeof(u32));
25 __uint(value_size, sizeof(struct bpf_perf_event_value));
26} accum_readings SEC(".maps");
27
28/* sample counts, one per cpu */
29struct {
30 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
31 __uint(key_size, sizeof(u32));
32 __uint(value_size, sizeof(u64));
33} counts SEC(".maps");
34
35const volatile __u32 num_cpu = 1;
36const volatile __u32 num_metric = 1;
37#define MAX_NUM_MATRICS 4
38
39SEC("fentry/XXX")
40int BPF_PROG(fentry_XXX)
41{
42 struct bpf_perf_event_value *ptrs[MAX_NUM_MATRICS];
43 u32 key = bpf_get_smp_processor_id();
44 u32 i;
45
46 /* look up before reading, to reduce error */
47 for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
48 u32 flag = i;
49
50 ptrs[i] = bpf_map_lookup_elem(&fentry_readings, &flag);
51 if (!ptrs[i])
52 return 0;
53 }
54
55 for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
56 struct bpf_perf_event_value reading;
57 int err;
58
59 err = bpf_perf_event_read_value(&events, key, &reading,
60 sizeof(reading));
61 if (err)
62 return 0;
63 *(ptrs[i]) = reading;
64 key += num_cpu;
65 }
66
67 return 0;
68}
69
70static inline void
71fexit_update_maps(u32 id, struct bpf_perf_event_value *after)
72{
73 struct bpf_perf_event_value *before, diff;
74
75 before = bpf_map_lookup_elem(&fentry_readings, &id);
76 /* only account samples with a valid fentry_reading */
77 if (before && before->counter) {
78 struct bpf_perf_event_value *accum;
79
80 diff.counter = after->counter - before->counter;
81 diff.enabled = after->enabled - before->enabled;
82 diff.running = after->running - before->running;
83
84 accum = bpf_map_lookup_elem(&accum_readings, &id);
85 if (accum) {
86 accum->counter += diff.counter;
87 accum->enabled += diff.enabled;
88 accum->running += diff.running;
89 }
90 }
91}
92
93SEC("fexit/XXX")
94int BPF_PROG(fexit_XXX)
95{
96 struct bpf_perf_event_value readings[MAX_NUM_MATRICS];
97 u32 cpu = bpf_get_smp_processor_id();
98 u32 i, zero = 0;
99 int err;
100 u64 *count;
101
102 /* read all events before updating the maps, to reduce error */
103 for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++) {
104 err = bpf_perf_event_read_value(&events, cpu + i * num_cpu,
105 readings + i, sizeof(*readings));
106 if (err)
107 return 0;
108 }
109 count = bpf_map_lookup_elem(&counts, &zero);
110 if (count) {
111 *count += 1;
112 for (i = 0; i < num_metric && i < MAX_NUM_MATRICS; i++)
113 fexit_update_maps(i, &readings[i]);
114 }
115 return 0;
116}
117
118char LICENSE[] SEC("license") = "Dual BSD/GPL";