Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bpf_kwork_top.c
4 *
5 * Copyright (c) 2022 Huawei Inc, Yang Jihong <yangjihong1@huawei.com>
6 */
7
8#include <time.h>
9#include <fcntl.h>
10#include <signal.h>
11#include <stdio.h>
12#include <unistd.h>
13
14#include <linux/time64.h>
15
16#include "util/debug.h"
17#include "util/evsel.h"
18#include "util/kwork.h"
19
20#include <bpf/bpf.h>
21#include <perf/cpumap.h>
22
23#include "util/bpf_skel/kwork_top.skel.h"
24
25/*
26 * This should be in sync with "util/kwork_top.bpf.c"
27 */
28#define MAX_COMMAND_LEN 16
29
30struct time_data {
31 __u64 timestamp;
32};
33
34struct work_data {
35 __u64 runtime;
36};
37
38struct task_data {
39 __u32 tgid;
40 __u32 is_kthread;
41 char comm[MAX_COMMAND_LEN];
42};
43
44struct work_key {
45 __u32 type;
46 __u32 pid;
47 __u64 task_p;
48};
49
50struct task_key {
51 __u32 pid;
52 __u32 cpu;
53};
54
55struct kwork_class_bpf {
56 struct kwork_class *class;
57 void (*load_prepare)(void);
58};
59
60static struct kwork_top_bpf *skel;
61
62void perf_kwork__top_start(void)
63{
64 struct timespec ts;
65
66 clock_gettime(CLOCK_MONOTONIC, &ts);
67 skel->bss->from_timestamp = (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
68 skel->bss->enabled = 1;
69 pr_debug("perf kwork top start at: %lld\n", skel->bss->from_timestamp);
70}
71
72void perf_kwork__top_finish(void)
73{
74 struct timespec ts;
75
76 skel->bss->enabled = 0;
77 clock_gettime(CLOCK_MONOTONIC, &ts);
78 skel->bss->to_timestamp = (u64)ts.tv_sec * NSEC_PER_SEC + ts.tv_nsec;
79 pr_debug("perf kwork top finish at: %lld\n", skel->bss->to_timestamp);
80}
81
82static void irq_load_prepare(void)
83{
84 bpf_program__set_autoload(skel->progs.on_irq_handler_entry, true);
85 bpf_program__set_autoload(skel->progs.on_irq_handler_exit, true);
86}
87
88static struct kwork_class_bpf kwork_irq_bpf = {
89 .load_prepare = irq_load_prepare,
90};
91
92static void softirq_load_prepare(void)
93{
94 bpf_program__set_autoload(skel->progs.on_softirq_entry, true);
95 bpf_program__set_autoload(skel->progs.on_softirq_exit, true);
96}
97
98static struct kwork_class_bpf kwork_softirq_bpf = {
99 .load_prepare = softirq_load_prepare,
100};
101
102static void sched_load_prepare(void)
103{
104 bpf_program__set_autoload(skel->progs.on_switch, true);
105}
106
107static struct kwork_class_bpf kwork_sched_bpf = {
108 .load_prepare = sched_load_prepare,
109};
110
111static struct kwork_class_bpf *
112kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = {
113 [KWORK_CLASS_IRQ] = &kwork_irq_bpf,
114 [KWORK_CLASS_SOFTIRQ] = &kwork_softirq_bpf,
115 [KWORK_CLASS_SCHED] = &kwork_sched_bpf,
116};
117
118static bool valid_kwork_class_type(enum kwork_class_type type)
119{
120 return type >= 0 && type < KWORK_CLASS_MAX;
121}
122
123static int setup_filters(struct perf_kwork *kwork)
124{
125 if (kwork->cpu_list) {
126 int idx, nr_cpus, fd;
127 struct perf_cpu_map *map;
128 struct perf_cpu cpu;
129
130 fd = bpf_map__fd(skel->maps.kwork_top_cpu_filter);
131 if (fd < 0) {
132 pr_debug("Invalid cpu filter fd\n");
133 return -1;
134 }
135
136 map = perf_cpu_map__new(kwork->cpu_list);
137 if (!map) {
138 pr_debug("Invalid cpu_list\n");
139 return -1;
140 }
141
142 nr_cpus = libbpf_num_possible_cpus();
143 perf_cpu_map__for_each_cpu(cpu, idx, map) {
144 u8 val = 1;
145
146 if (cpu.cpu >= nr_cpus) {
147 perf_cpu_map__put(map);
148 pr_err("Requested cpu %d too large\n", cpu.cpu);
149 return -1;
150 }
151 bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
152 }
153 perf_cpu_map__put(map);
154 }
155
156 return 0;
157}
158
159int perf_kwork__top_prepare_bpf(struct perf_kwork *kwork)
160{
161 struct bpf_program *prog;
162 struct kwork_class *class;
163 struct kwork_class_bpf *class_bpf;
164 enum kwork_class_type type;
165
166 skel = kwork_top_bpf__open();
167 if (!skel) {
168 pr_debug("Failed to open kwork top skeleton\n");
169 return -1;
170 }
171
172 /*
173 * set all progs to non-autoload,
174 * then set corresponding progs according to config
175 */
176 bpf_object__for_each_program(prog, skel->obj)
177 bpf_program__set_autoload(prog, false);
178
179 list_for_each_entry(class, &kwork->class_list, list) {
180 type = class->type;
181 if (!valid_kwork_class_type(type) ||
182 !kwork_class_bpf_supported_list[type]) {
183 pr_err("Unsupported bpf trace class %s\n", class->name);
184 goto out;
185 }
186
187 class_bpf = kwork_class_bpf_supported_list[type];
188 class_bpf->class = class;
189
190 if (class_bpf->load_prepare)
191 class_bpf->load_prepare();
192 }
193
194 if (kwork->cpu_list)
195 skel->rodata->has_cpu_filter = 1;
196
197 if (kwork_top_bpf__load(skel)) {
198 pr_debug("Failed to load kwork top skeleton\n");
199 goto out;
200 }
201
202 if (setup_filters(kwork))
203 goto out;
204
205 if (kwork_top_bpf__attach(skel)) {
206 pr_debug("Failed to attach kwork top skeleton\n");
207 goto out;
208 }
209
210 return 0;
211
212out:
213 kwork_top_bpf__destroy(skel);
214 return -1;
215}
216
217static void read_task_info(struct kwork_work *work)
218{
219 int fd;
220 struct task_data data;
221 struct task_key key = {
222 .pid = work->id,
223 .cpu = work->cpu,
224 };
225
226 fd = bpf_map__fd(skel->maps.kwork_top_tasks);
227 if (fd < 0) {
228 pr_debug("Invalid top tasks map fd\n");
229 return;
230 }
231
232 if (!bpf_map_lookup_elem(fd, &key, &data)) {
233 work->tgid = data.tgid;
234 work->is_kthread = data.is_kthread;
235 work->name = strdup(data.comm);
236 }
237}
238static int add_work(struct perf_kwork *kwork, struct work_key *key,
239 struct work_data *data, int cpu)
240{
241 struct kwork_class_bpf *bpf_trace;
242 struct kwork_work *work;
243 struct kwork_work tmp = {
244 .id = key->pid,
245 .cpu = cpu,
246 .name = NULL,
247 };
248 enum kwork_class_type type = key->type;
249
250 if (!valid_kwork_class_type(type)) {
251 pr_debug("Invalid class type %d to add work\n", type);
252 return -1;
253 }
254
255 bpf_trace = kwork_class_bpf_supported_list[type];
256 tmp.class = bpf_trace->class;
257
258 work = perf_kwork_add_work(kwork, tmp.class, &tmp);
259 if (!work)
260 return -1;
261
262 work->total_runtime = data->runtime;
263 read_task_info(work);
264
265 return 0;
266}
267
268int perf_kwork__top_read_bpf(struct perf_kwork *kwork)
269{
270 int i, fd, nr_cpus;
271 struct work_data *data;
272 struct work_key key, prev;
273
274 fd = bpf_map__fd(skel->maps.kwork_top_works);
275 if (fd < 0) {
276 pr_debug("Invalid top runtime fd\n");
277 return -1;
278 }
279
280 nr_cpus = libbpf_num_possible_cpus();
281 data = calloc(nr_cpus, sizeof(struct work_data));
282 if (!data)
283 return -1;
284
285 memset(&prev, 0, sizeof(prev));
286 while (!bpf_map_get_next_key(fd, &prev, &key)) {
287 if ((bpf_map_lookup_elem(fd, &key, data)) != 0) {
288 pr_debug("Failed to lookup top elem\n");
289 return -1;
290 }
291
292 for (i = 0; i < nr_cpus; i++) {
293 if (data[i].runtime == 0)
294 continue;
295
296 if (add_work(kwork, &key, &data[i], i))
297 return -1;
298 }
299 prev = key;
300 }
301 free(data);
302
303 return 0;
304}
305
306void perf_kwork__top_cleanup_bpf(void)
307{
308 kwork_top_bpf__destroy(skel);
309}