Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bpf-loader.c
4 *
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
7 */
8
9#include <linux/bpf.h>
10#include <bpf/libbpf.h>
11#include <bpf/bpf.h>
12#include <linux/err.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <errno.h>
16#include "perf.h"
17#include "debug.h"
18#include "bpf-loader.h"
19#include "bpf-prologue.h"
20#include "probe-event.h"
21#include "probe-finder.h" // for MAX_PROBES
22#include "parse-events.h"
23#include "strfilter.h"
24#include "llvm-utils.h"
25#include "c++/clang-c.h"
26
27#define DEFINE_PRINT_FN(name, level) \
28static int libbpf_##name(const char *fmt, ...) \
29{ \
30 va_list args; \
31 int ret; \
32 \
33 va_start(args, fmt); \
34 ret = veprintf(level, verbose, pr_fmt(fmt), args);\
35 va_end(args); \
36 return ret; \
37}
38
39DEFINE_PRINT_FN(warning, 1)
40DEFINE_PRINT_FN(info, 1)
41DEFINE_PRINT_FN(debug, 1)
42
43struct bpf_prog_priv {
44 bool is_tp;
45 char *sys_name;
46 char *evt_name;
47 struct perf_probe_event pev;
48 bool need_prologue;
49 struct bpf_insn *insns_buf;
50 int nr_types;
51 int *type_mapping;
52};
53
54static bool libbpf_initialized;
55
56struct bpf_object *
57bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
58{
59 struct bpf_object *obj;
60
61 if (!libbpf_initialized) {
62 libbpf_set_print(libbpf_warning,
63 libbpf_info,
64 libbpf_debug);
65 libbpf_initialized = true;
66 }
67
68 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name);
69 if (IS_ERR_OR_NULL(obj)) {
70 pr_debug("bpf: failed to load buffer\n");
71 return ERR_PTR(-EINVAL);
72 }
73
74 return obj;
75}
76
77struct bpf_object *bpf__prepare_load(const char *filename, bool source)
78{
79 struct bpf_object *obj;
80
81 if (!libbpf_initialized) {
82 libbpf_set_print(libbpf_warning,
83 libbpf_info,
84 libbpf_debug);
85 libbpf_initialized = true;
86 }
87
88 if (source) {
89 int err;
90 void *obj_buf;
91 size_t obj_buf_sz;
92
93 perf_clang__init();
94 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
95 perf_clang__cleanup();
96 if (err) {
97 pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
98 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
99 if (err)
100 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
101 } else
102 pr_debug("bpf: successfull builtin compilation\n");
103 obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename);
104
105 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
106 llvm__dump_obj(filename, obj_buf, obj_buf_sz);
107
108 free(obj_buf);
109 } else
110 obj = bpf_object__open(filename);
111
112 if (IS_ERR_OR_NULL(obj)) {
113 pr_debug("bpf: failed to load %s\n", filename);
114 return obj;
115 }
116
117 return obj;
118}
119
120void bpf__clear(void)
121{
122 struct bpf_object *obj, *tmp;
123
124 bpf_object__for_each_safe(obj, tmp) {
125 bpf__unprobe(obj);
126 bpf_object__close(obj);
127 }
128}
129
130static void
131clear_prog_priv(struct bpf_program *prog __maybe_unused,
132 void *_priv)
133{
134 struct bpf_prog_priv *priv = _priv;
135
136 cleanup_perf_probe_events(&priv->pev, 1);
137 zfree(&priv->insns_buf);
138 zfree(&priv->type_mapping);
139 zfree(&priv->sys_name);
140 zfree(&priv->evt_name);
141 free(priv);
142}
143
144static int
145prog_config__exec(const char *value, struct perf_probe_event *pev)
146{
147 pev->uprobes = true;
148 pev->target = strdup(value);
149 if (!pev->target)
150 return -ENOMEM;
151 return 0;
152}
153
154static int
155prog_config__module(const char *value, struct perf_probe_event *pev)
156{
157 pev->uprobes = false;
158 pev->target = strdup(value);
159 if (!pev->target)
160 return -ENOMEM;
161 return 0;
162}
163
164static int
165prog_config__bool(const char *value, bool *pbool, bool invert)
166{
167 int err;
168 bool bool_value;
169
170 if (!pbool)
171 return -EINVAL;
172
173 err = strtobool(value, &bool_value);
174 if (err)
175 return err;
176
177 *pbool = invert ? !bool_value : bool_value;
178 return 0;
179}
180
181static int
182prog_config__inlines(const char *value,
183 struct perf_probe_event *pev __maybe_unused)
184{
185 return prog_config__bool(value, &probe_conf.no_inlines, true);
186}
187
188static int
189prog_config__force(const char *value,
190 struct perf_probe_event *pev __maybe_unused)
191{
192 return prog_config__bool(value, &probe_conf.force_add, false);
193}
194
195static struct {
196 const char *key;
197 const char *usage;
198 const char *desc;
199 int (*func)(const char *, struct perf_probe_event *);
200} bpf_prog_config_terms[] = {
201 {
202 .key = "exec",
203 .usage = "exec=<full path of file>",
204 .desc = "Set uprobe target",
205 .func = prog_config__exec,
206 },
207 {
208 .key = "module",
209 .usage = "module=<module name> ",
210 .desc = "Set kprobe module",
211 .func = prog_config__module,
212 },
213 {
214 .key = "inlines",
215 .usage = "inlines=[yes|no] ",
216 .desc = "Probe at inline symbol",
217 .func = prog_config__inlines,
218 },
219 {
220 .key = "force",
221 .usage = "force=[yes|no] ",
222 .desc = "Forcibly add events with existing name",
223 .func = prog_config__force,
224 },
225};
226
227static int
228do_prog_config(const char *key, const char *value,
229 struct perf_probe_event *pev)
230{
231 unsigned int i;
232
233 pr_debug("config bpf program: %s=%s\n", key, value);
234 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
235 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
236 return bpf_prog_config_terms[i].func(value, pev);
237
238 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
239 key, value);
240
241 pr_debug("\nHint: Valid options are:\n");
242 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
243 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
244 bpf_prog_config_terms[i].desc);
245 pr_debug("\n");
246
247 return -BPF_LOADER_ERRNO__PROGCONF_TERM;
248}
249
250static const char *
251parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
252{
253 char *text = strdup(config_str);
254 char *sep, *line;
255 const char *main_str = NULL;
256 int err = 0;
257
258 if (!text) {
259 pr_debug("Not enough memory: dup config_str failed\n");
260 return ERR_PTR(-ENOMEM);
261 }
262
263 line = text;
264 while ((sep = strchr(line, ';'))) {
265 char *equ;
266
267 *sep = '\0';
268 equ = strchr(line, '=');
269 if (!equ) {
270 pr_warning("WARNING: invalid config in BPF object: %s\n",
271 line);
272 pr_warning("\tShould be 'key=value'.\n");
273 goto nextline;
274 }
275 *equ = '\0';
276
277 err = do_prog_config(line, equ + 1, pev);
278 if (err)
279 break;
280nextline:
281 line = sep + 1;
282 }
283
284 if (!err)
285 main_str = config_str + (line - text);
286 free(text);
287
288 return err ? ERR_PTR(err) : main_str;
289}
290
291static int
292parse_prog_config(const char *config_str, const char **p_main_str,
293 bool *is_tp, struct perf_probe_event *pev)
294{
295 int err;
296 const char *main_str = parse_prog_config_kvpair(config_str, pev);
297
298 if (IS_ERR(main_str))
299 return PTR_ERR(main_str);
300
301 *p_main_str = main_str;
302 if (!strchr(main_str, '=')) {
303 /* Is a tracepoint event? */
304 const char *s = strchr(main_str, ':');
305
306 if (!s) {
307 pr_debug("bpf: '%s' is not a valid tracepoint\n",
308 config_str);
309 return -BPF_LOADER_ERRNO__CONFIG;
310 }
311
312 *is_tp = true;
313 return 0;
314 }
315
316 *is_tp = false;
317 err = parse_perf_probe_command(main_str, pev);
318 if (err < 0) {
319 pr_debug("bpf: '%s' is not a valid config string\n",
320 config_str);
321 /* parse failed, don't need clear pev. */
322 return -BPF_LOADER_ERRNO__CONFIG;
323 }
324 return 0;
325}
326
327static int
328config_bpf_program(struct bpf_program *prog)
329{
330 struct perf_probe_event *pev = NULL;
331 struct bpf_prog_priv *priv = NULL;
332 const char *config_str, *main_str;
333 bool is_tp = false;
334 int err;
335
336 /* Initialize per-program probing setting */
337 probe_conf.no_inlines = false;
338 probe_conf.force_add = false;
339
340 config_str = bpf_program__title(prog, false);
341 if (IS_ERR(config_str)) {
342 pr_debug("bpf: unable to get title for program\n");
343 return PTR_ERR(config_str);
344 }
345
346 priv = calloc(sizeof(*priv), 1);
347 if (!priv) {
348 pr_debug("bpf: failed to alloc priv\n");
349 return -ENOMEM;
350 }
351 pev = &priv->pev;
352
353 pr_debug("bpf: config program '%s'\n", config_str);
354 err = parse_prog_config(config_str, &main_str, &is_tp, pev);
355 if (err)
356 goto errout;
357
358 if (is_tp) {
359 char *s = strchr(main_str, ':');
360
361 priv->is_tp = true;
362 priv->sys_name = strndup(main_str, s - main_str);
363 priv->evt_name = strdup(s + 1);
364 goto set_priv;
365 }
366
367 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
368 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
369 config_str, PERF_BPF_PROBE_GROUP);
370 err = -BPF_LOADER_ERRNO__GROUP;
371 goto errout;
372 } else if (!pev->group)
373 pev->group = strdup(PERF_BPF_PROBE_GROUP);
374
375 if (!pev->group) {
376 pr_debug("bpf: strdup failed\n");
377 err = -ENOMEM;
378 goto errout;
379 }
380
381 if (!pev->event) {
382 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
383 config_str);
384 err = -BPF_LOADER_ERRNO__EVENTNAME;
385 goto errout;
386 }
387 pr_debug("bpf: config '%s' is ok\n", config_str);
388
389set_priv:
390 err = bpf_program__set_priv(prog, priv, clear_prog_priv);
391 if (err) {
392 pr_debug("Failed to set priv for program '%s'\n", config_str);
393 goto errout;
394 }
395
396 return 0;
397
398errout:
399 if (pev)
400 clear_perf_probe_event(pev);
401 free(priv);
402 return err;
403}
404
405static int bpf__prepare_probe(void)
406{
407 static int err = 0;
408 static bool initialized = false;
409
410 /*
411 * Make err static, so if init failed the first, bpf__prepare_probe()
412 * fails each time without calling init_probe_symbol_maps multiple
413 * times.
414 */
415 if (initialized)
416 return err;
417
418 initialized = true;
419 err = init_probe_symbol_maps(false);
420 if (err < 0)
421 pr_debug("Failed to init_probe_symbol_maps\n");
422 probe_conf.max_probes = MAX_PROBES;
423 return err;
424}
425
426static int
427preproc_gen_prologue(struct bpf_program *prog, int n,
428 struct bpf_insn *orig_insns, int orig_insns_cnt,
429 struct bpf_prog_prep_result *res)
430{
431 struct bpf_prog_priv *priv = bpf_program__priv(prog);
432 struct probe_trace_event *tev;
433 struct perf_probe_event *pev;
434 struct bpf_insn *buf;
435 size_t prologue_cnt = 0;
436 int i, err;
437
438 if (IS_ERR(priv) || !priv || priv->is_tp)
439 goto errout;
440
441 pev = &priv->pev;
442
443 if (n < 0 || n >= priv->nr_types)
444 goto errout;
445
446 /* Find a tev belongs to that type */
447 for (i = 0; i < pev->ntevs; i++) {
448 if (priv->type_mapping[i] == n)
449 break;
450 }
451
452 if (i >= pev->ntevs) {
453 pr_debug("Internal error: prologue type %d not found\n", n);
454 return -BPF_LOADER_ERRNO__PROLOGUE;
455 }
456
457 tev = &pev->tevs[i];
458
459 buf = priv->insns_buf;
460 err = bpf__gen_prologue(tev->args, tev->nargs,
461 buf, &prologue_cnt,
462 BPF_MAXINSNS - orig_insns_cnt);
463 if (err) {
464 const char *title;
465
466 title = bpf_program__title(prog, false);
467 if (!title)
468 title = "[unknown]";
469
470 pr_debug("Failed to generate prologue for program %s\n",
471 title);
472 return err;
473 }
474
475 memcpy(&buf[prologue_cnt], orig_insns,
476 sizeof(struct bpf_insn) * orig_insns_cnt);
477
478 res->new_insn_ptr = buf;
479 res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
480 res->pfd = NULL;
481 return 0;
482
483errout:
484 pr_debug("Internal error in preproc_gen_prologue\n");
485 return -BPF_LOADER_ERRNO__PROLOGUE;
486}
487
488/*
489 * compare_tev_args is reflexive, transitive and antisymmetric.
490 * I can proof it but this margin is too narrow to contain.
491 */
492static int compare_tev_args(const void *ptev1, const void *ptev2)
493{
494 int i, ret;
495 const struct probe_trace_event *tev1 =
496 *(const struct probe_trace_event **)ptev1;
497 const struct probe_trace_event *tev2 =
498 *(const struct probe_trace_event **)ptev2;
499
500 ret = tev2->nargs - tev1->nargs;
501 if (ret)
502 return ret;
503
504 for (i = 0; i < tev1->nargs; i++) {
505 struct probe_trace_arg *arg1, *arg2;
506 struct probe_trace_arg_ref *ref1, *ref2;
507
508 arg1 = &tev1->args[i];
509 arg2 = &tev2->args[i];
510
511 ret = strcmp(arg1->value, arg2->value);
512 if (ret)
513 return ret;
514
515 ref1 = arg1->ref;
516 ref2 = arg2->ref;
517
518 while (ref1 && ref2) {
519 ret = ref2->offset - ref1->offset;
520 if (ret)
521 return ret;
522
523 ref1 = ref1->next;
524 ref2 = ref2->next;
525 }
526
527 if (ref1 || ref2)
528 return ref2 ? 1 : -1;
529 }
530
531 return 0;
532}
533
534/*
535 * Assign a type number to each tevs in a pev.
536 * mapping is an array with same slots as tevs in that pev.
537 * nr_types will be set to number of types.
538 */
539static int map_prologue(struct perf_probe_event *pev, int *mapping,
540 int *nr_types)
541{
542 int i, type = 0;
543 struct probe_trace_event **ptevs;
544
545 size_t array_sz = sizeof(*ptevs) * pev->ntevs;
546
547 ptevs = malloc(array_sz);
548 if (!ptevs) {
549 pr_debug("Not enough memory: alloc ptevs failed\n");
550 return -ENOMEM;
551 }
552
553 pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
554 for (i = 0; i < pev->ntevs; i++)
555 ptevs[i] = &pev->tevs[i];
556
557 qsort(ptevs, pev->ntevs, sizeof(*ptevs),
558 compare_tev_args);
559
560 for (i = 0; i < pev->ntevs; i++) {
561 int n;
562
563 n = ptevs[i] - pev->tevs;
564 if (i == 0) {
565 mapping[n] = type;
566 pr_debug("mapping[%d]=%d\n", n, type);
567 continue;
568 }
569
570 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
571 mapping[n] = type;
572 else
573 mapping[n] = ++type;
574
575 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
576 }
577 free(ptevs);
578 *nr_types = type + 1;
579
580 return 0;
581}
582
583static int hook_load_preprocessor(struct bpf_program *prog)
584{
585 struct bpf_prog_priv *priv = bpf_program__priv(prog);
586 struct perf_probe_event *pev;
587 bool need_prologue = false;
588 int err, i;
589
590 if (IS_ERR(priv) || !priv) {
591 pr_debug("Internal error when hook preprocessor\n");
592 return -BPF_LOADER_ERRNO__INTERNAL;
593 }
594
595 if (priv->is_tp) {
596 priv->need_prologue = false;
597 return 0;
598 }
599
600 pev = &priv->pev;
601 for (i = 0; i < pev->ntevs; i++) {
602 struct probe_trace_event *tev = &pev->tevs[i];
603
604 if (tev->nargs > 0) {
605 need_prologue = true;
606 break;
607 }
608 }
609
610 /*
611 * Since all tevs don't have argument, we don't need generate
612 * prologue.
613 */
614 if (!need_prologue) {
615 priv->need_prologue = false;
616 return 0;
617 }
618
619 priv->need_prologue = true;
620 priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
621 if (!priv->insns_buf) {
622 pr_debug("Not enough memory: alloc insns_buf failed\n");
623 return -ENOMEM;
624 }
625
626 priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
627 if (!priv->type_mapping) {
628 pr_debug("Not enough memory: alloc type_mapping failed\n");
629 return -ENOMEM;
630 }
631 memset(priv->type_mapping, -1,
632 sizeof(int) * pev->ntevs);
633
634 err = map_prologue(pev, priv->type_mapping, &priv->nr_types);
635 if (err)
636 return err;
637
638 err = bpf_program__set_prep(prog, priv->nr_types,
639 preproc_gen_prologue);
640 return err;
641}
642
643int bpf__probe(struct bpf_object *obj)
644{
645 int err = 0;
646 struct bpf_program *prog;
647 struct bpf_prog_priv *priv;
648 struct perf_probe_event *pev;
649
650 err = bpf__prepare_probe();
651 if (err) {
652 pr_debug("bpf__prepare_probe failed\n");
653 return err;
654 }
655
656 bpf_object__for_each_program(prog, obj) {
657 err = config_bpf_program(prog);
658 if (err)
659 goto out;
660
661 priv = bpf_program__priv(prog);
662 if (IS_ERR(priv) || !priv) {
663 err = PTR_ERR(priv);
664 goto out;
665 }
666
667 if (priv->is_tp) {
668 bpf_program__set_tracepoint(prog);
669 continue;
670 }
671
672 bpf_program__set_kprobe(prog);
673 pev = &priv->pev;
674
675 err = convert_perf_probe_events(pev, 1);
676 if (err < 0) {
677 pr_debug("bpf_probe: failed to convert perf probe events\n");
678 goto out;
679 }
680
681 err = apply_perf_probe_events(pev, 1);
682 if (err < 0) {
683 pr_debug("bpf_probe: failed to apply perf probe events\n");
684 goto out;
685 }
686
687 /*
688 * After probing, let's consider prologue, which
689 * adds program fetcher to BPF programs.
690 *
691 * hook_load_preprocessorr() hooks pre-processor
692 * to bpf_program, let it generate prologue
693 * dynamically during loading.
694 */
695 err = hook_load_preprocessor(prog);
696 if (err)
697 goto out;
698 }
699out:
700 return err < 0 ? err : 0;
701}
702
703#define EVENTS_WRITE_BUFSIZE 4096
704int bpf__unprobe(struct bpf_object *obj)
705{
706 int err, ret = 0;
707 struct bpf_program *prog;
708
709 bpf_object__for_each_program(prog, obj) {
710 struct bpf_prog_priv *priv = bpf_program__priv(prog);
711 int i;
712
713 if (IS_ERR(priv) || !priv || priv->is_tp)
714 continue;
715
716 for (i = 0; i < priv->pev.ntevs; i++) {
717 struct probe_trace_event *tev = &priv->pev.tevs[i];
718 char name_buf[EVENTS_WRITE_BUFSIZE];
719 struct strfilter *delfilter;
720
721 snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
722 "%s:%s", tev->group, tev->event);
723 name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
724
725 delfilter = strfilter__new(name_buf, NULL);
726 if (!delfilter) {
727 pr_debug("Failed to create filter for unprobing\n");
728 ret = -ENOMEM;
729 continue;
730 }
731
732 err = del_perf_probe_events(delfilter);
733 strfilter__delete(delfilter);
734 if (err) {
735 pr_debug("Failed to delete %s\n", name_buf);
736 ret = err;
737 continue;
738 }
739 }
740 }
741 return ret;
742}
743
744int bpf__load(struct bpf_object *obj)
745{
746 int err;
747
748 err = bpf_object__load(obj);
749 if (err) {
750 pr_debug("bpf: load objects failed\n");
751 return err;
752 }
753 return 0;
754}
755
756int bpf__foreach_event(struct bpf_object *obj,
757 bpf_prog_iter_callback_t func,
758 void *arg)
759{
760 struct bpf_program *prog;
761 int err;
762
763 bpf_object__for_each_program(prog, obj) {
764 struct bpf_prog_priv *priv = bpf_program__priv(prog);
765 struct probe_trace_event *tev;
766 struct perf_probe_event *pev;
767 int i, fd;
768
769 if (IS_ERR(priv) || !priv) {
770 pr_debug("bpf: failed to get private field\n");
771 return -BPF_LOADER_ERRNO__INTERNAL;
772 }
773
774 if (priv->is_tp) {
775 fd = bpf_program__fd(prog);
776 err = (*func)(priv->sys_name, priv->evt_name, fd, arg);
777 if (err) {
778 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
779 return err;
780 }
781 continue;
782 }
783
784 pev = &priv->pev;
785 for (i = 0; i < pev->ntevs; i++) {
786 tev = &pev->tevs[i];
787
788 if (priv->need_prologue) {
789 int type = priv->type_mapping[i];
790
791 fd = bpf_program__nth_fd(prog, type);
792 } else {
793 fd = bpf_program__fd(prog);
794 }
795
796 if (fd < 0) {
797 pr_debug("bpf: failed to get file descriptor\n");
798 return fd;
799 }
800
801 err = (*func)(tev->group, tev->event, fd, arg);
802 if (err) {
803 pr_debug("bpf: call back failed, stop iterate\n");
804 return err;
805 }
806 }
807 }
808 return 0;
809}
810
811enum bpf_map_op_type {
812 BPF_MAP_OP_SET_VALUE,
813 BPF_MAP_OP_SET_EVSEL,
814};
815
816enum bpf_map_key_type {
817 BPF_MAP_KEY_ALL,
818 BPF_MAP_KEY_RANGES,
819};
820
821struct bpf_map_op {
822 struct list_head list;
823 enum bpf_map_op_type op_type;
824 enum bpf_map_key_type key_type;
825 union {
826 struct parse_events_array array;
827 } k;
828 union {
829 u64 value;
830 struct perf_evsel *evsel;
831 } v;
832};
833
834struct bpf_map_priv {
835 struct list_head ops_list;
836};
837
838static void
839bpf_map_op__delete(struct bpf_map_op *op)
840{
841 if (!list_empty(&op->list))
842 list_del(&op->list);
843 if (op->key_type == BPF_MAP_KEY_RANGES)
844 parse_events__clear_array(&op->k.array);
845 free(op);
846}
847
848static void
849bpf_map_priv__purge(struct bpf_map_priv *priv)
850{
851 struct bpf_map_op *pos, *n;
852
853 list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
854 list_del_init(&pos->list);
855 bpf_map_op__delete(pos);
856 }
857}
858
859static void
860bpf_map_priv__clear(struct bpf_map *map __maybe_unused,
861 void *_priv)
862{
863 struct bpf_map_priv *priv = _priv;
864
865 bpf_map_priv__purge(priv);
866 free(priv);
867}
868
869static int
870bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
871{
872 op->key_type = BPF_MAP_KEY_ALL;
873 if (!term)
874 return 0;
875
876 if (term->array.nr_ranges) {
877 size_t memsz = term->array.nr_ranges *
878 sizeof(op->k.array.ranges[0]);
879
880 op->k.array.ranges = memdup(term->array.ranges, memsz);
881 if (!op->k.array.ranges) {
882 pr_debug("Not enough memory to alloc indices for map\n");
883 return -ENOMEM;
884 }
885 op->key_type = BPF_MAP_KEY_RANGES;
886 op->k.array.nr_ranges = term->array.nr_ranges;
887 }
888 return 0;
889}
890
891static struct bpf_map_op *
892bpf_map_op__new(struct parse_events_term *term)
893{
894 struct bpf_map_op *op;
895 int err;
896
897 op = zalloc(sizeof(*op));
898 if (!op) {
899 pr_debug("Failed to alloc bpf_map_op\n");
900 return ERR_PTR(-ENOMEM);
901 }
902 INIT_LIST_HEAD(&op->list);
903
904 err = bpf_map_op_setkey(op, term);
905 if (err) {
906 free(op);
907 return ERR_PTR(err);
908 }
909 return op;
910}
911
912static struct bpf_map_op *
913bpf_map_op__clone(struct bpf_map_op *op)
914{
915 struct bpf_map_op *newop;
916
917 newop = memdup(op, sizeof(*op));
918 if (!newop) {
919 pr_debug("Failed to alloc bpf_map_op\n");
920 return NULL;
921 }
922
923 INIT_LIST_HEAD(&newop->list);
924 if (op->key_type == BPF_MAP_KEY_RANGES) {
925 size_t memsz = op->k.array.nr_ranges *
926 sizeof(op->k.array.ranges[0]);
927
928 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
929 if (!newop->k.array.ranges) {
930 pr_debug("Failed to alloc indices for map\n");
931 free(newop);
932 return NULL;
933 }
934 }
935
936 return newop;
937}
938
939static struct bpf_map_priv *
940bpf_map_priv__clone(struct bpf_map_priv *priv)
941{
942 struct bpf_map_priv *newpriv;
943 struct bpf_map_op *pos, *newop;
944
945 newpriv = zalloc(sizeof(*newpriv));
946 if (!newpriv) {
947 pr_debug("Not enough memory to alloc map private\n");
948 return NULL;
949 }
950 INIT_LIST_HEAD(&newpriv->ops_list);
951
952 list_for_each_entry(pos, &priv->ops_list, list) {
953 newop = bpf_map_op__clone(pos);
954 if (!newop) {
955 bpf_map_priv__purge(newpriv);
956 return NULL;
957 }
958 list_add_tail(&newop->list, &newpriv->ops_list);
959 }
960
961 return newpriv;
962}
963
964static int
965bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
966{
967 const char *map_name = bpf_map__name(map);
968 struct bpf_map_priv *priv = bpf_map__priv(map);
969
970 if (IS_ERR(priv)) {
971 pr_debug("Failed to get private from map %s\n", map_name);
972 return PTR_ERR(priv);
973 }
974
975 if (!priv) {
976 priv = zalloc(sizeof(*priv));
977 if (!priv) {
978 pr_debug("Not enough memory to alloc map private\n");
979 return -ENOMEM;
980 }
981 INIT_LIST_HEAD(&priv->ops_list);
982
983 if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) {
984 free(priv);
985 return -BPF_LOADER_ERRNO__INTERNAL;
986 }
987 }
988
989 list_add_tail(&op->list, &priv->ops_list);
990 return 0;
991}
992
993static struct bpf_map_op *
994bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
995{
996 struct bpf_map_op *op;
997 int err;
998
999 op = bpf_map_op__new(term);
1000 if (IS_ERR(op))
1001 return op;
1002
1003 err = bpf_map__add_op(map, op);
1004 if (err) {
1005 bpf_map_op__delete(op);
1006 return ERR_PTR(err);
1007 }
1008 return op;
1009}
1010
1011static int
1012__bpf_map__config_value(struct bpf_map *map,
1013 struct parse_events_term *term)
1014{
1015 struct bpf_map_op *op;
1016 const char *map_name = bpf_map__name(map);
1017 const struct bpf_map_def *def = bpf_map__def(map);
1018
1019 if (IS_ERR(def)) {
1020 pr_debug("Unable to get map definition from '%s'\n",
1021 map_name);
1022 return -BPF_LOADER_ERRNO__INTERNAL;
1023 }
1024
1025 if (def->type != BPF_MAP_TYPE_ARRAY) {
1026 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1027 map_name);
1028 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1029 }
1030 if (def->key_size < sizeof(unsigned int)) {
1031 pr_debug("Map %s has incorrect key size\n", map_name);
1032 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1033 }
1034 switch (def->value_size) {
1035 case 1:
1036 case 2:
1037 case 4:
1038 case 8:
1039 break;
1040 default:
1041 pr_debug("Map %s has incorrect value size\n", map_name);
1042 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1043 }
1044
1045 op = bpf_map__add_newop(map, term);
1046 if (IS_ERR(op))
1047 return PTR_ERR(op);
1048 op->op_type = BPF_MAP_OP_SET_VALUE;
1049 op->v.value = term->val.num;
1050 return 0;
1051}
1052
1053static int
1054bpf_map__config_value(struct bpf_map *map,
1055 struct parse_events_term *term,
1056 struct perf_evlist *evlist __maybe_unused)
1057{
1058 if (!term->err_val) {
1059 pr_debug("Config value not set\n");
1060 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1061 }
1062
1063 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1064 pr_debug("ERROR: wrong value type for 'value'\n");
1065 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1066 }
1067
1068 return __bpf_map__config_value(map, term);
1069}
1070
1071static int
1072__bpf_map__config_event(struct bpf_map *map,
1073 struct parse_events_term *term,
1074 struct perf_evlist *evlist)
1075{
1076 struct perf_evsel *evsel;
1077 const struct bpf_map_def *def;
1078 struct bpf_map_op *op;
1079 const char *map_name = bpf_map__name(map);
1080
1081 evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str);
1082 if (!evsel) {
1083 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1084 map_name, term->val.str);
1085 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1086 }
1087
1088 def = bpf_map__def(map);
1089 if (IS_ERR(def)) {
1090 pr_debug("Unable to get map definition from '%s'\n",
1091 map_name);
1092 return PTR_ERR(def);
1093 }
1094
1095 /*
1096 * No need to check key_size and value_size:
1097 * kernel has already checked them.
1098 */
1099 if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1100 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1101 map_name);
1102 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1103 }
1104
1105 op = bpf_map__add_newop(map, term);
1106 if (IS_ERR(op))
1107 return PTR_ERR(op);
1108 op->op_type = BPF_MAP_OP_SET_EVSEL;
1109 op->v.evsel = evsel;
1110 return 0;
1111}
1112
1113static int
1114bpf_map__config_event(struct bpf_map *map,
1115 struct parse_events_term *term,
1116 struct perf_evlist *evlist)
1117{
1118 if (!term->err_val) {
1119 pr_debug("Config value not set\n");
1120 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1121 }
1122
1123 if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1124 pr_debug("ERROR: wrong value type for 'event'\n");
1125 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1126 }
1127
1128 return __bpf_map__config_event(map, term, evlist);
1129}
1130
1131struct bpf_obj_config__map_func {
1132 const char *config_opt;
1133 int (*config_func)(struct bpf_map *, struct parse_events_term *,
1134 struct perf_evlist *);
1135};
1136
1137struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1138 {"value", bpf_map__config_value},
1139 {"event", bpf_map__config_event},
1140};
1141
1142static int
1143config_map_indices_range_check(struct parse_events_term *term,
1144 struct bpf_map *map,
1145 const char *map_name)
1146{
1147 struct parse_events_array *array = &term->array;
1148 const struct bpf_map_def *def;
1149 unsigned int i;
1150
1151 if (!array->nr_ranges)
1152 return 0;
1153 if (!array->ranges) {
1154 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1155 map_name, (int)array->nr_ranges);
1156 return -BPF_LOADER_ERRNO__INTERNAL;
1157 }
1158
1159 def = bpf_map__def(map);
1160 if (IS_ERR(def)) {
1161 pr_debug("ERROR: Unable to get map definition from '%s'\n",
1162 map_name);
1163 return -BPF_LOADER_ERRNO__INTERNAL;
1164 }
1165
1166 for (i = 0; i < array->nr_ranges; i++) {
1167 unsigned int start = array->ranges[i].start;
1168 size_t length = array->ranges[i].length;
1169 unsigned int idx = start + length - 1;
1170
1171 if (idx >= def->max_entries) {
1172 pr_debug("ERROR: index %d too large\n", idx);
1173 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1174 }
1175 }
1176 return 0;
1177}
1178
1179static int
1180bpf__obj_config_map(struct bpf_object *obj,
1181 struct parse_events_term *term,
1182 struct perf_evlist *evlist,
1183 int *key_scan_pos)
1184{
1185 /* key is "map:<mapname>.<config opt>" */
1186 char *map_name = strdup(term->config + sizeof("map:") - 1);
1187 struct bpf_map *map;
1188 int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1189 char *map_opt;
1190 size_t i;
1191
1192 if (!map_name)
1193 return -ENOMEM;
1194
1195 map_opt = strchr(map_name, '.');
1196 if (!map_opt) {
1197 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1198 goto out;
1199 }
1200
1201 *map_opt++ = '\0';
1202 if (*map_opt == '\0') {
1203 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1204 goto out;
1205 }
1206
1207 map = bpf_object__find_map_by_name(obj, map_name);
1208 if (!map) {
1209 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1210 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1211 goto out;
1212 }
1213
1214 *key_scan_pos += strlen(map_opt);
1215 err = config_map_indices_range_check(term, map, map_name);
1216 if (err)
1217 goto out;
1218 *key_scan_pos -= strlen(map_opt);
1219
1220 for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1221 struct bpf_obj_config__map_func *func =
1222 &bpf_obj_config__map_funcs[i];
1223
1224 if (strcmp(map_opt, func->config_opt) == 0) {
1225 err = func->config_func(map, term, evlist);
1226 goto out;
1227 }
1228 }
1229
1230 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1231 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1232out:
1233 free(map_name);
1234 if (!err)
1235 key_scan_pos += strlen(map_opt);
1236 return err;
1237}
1238
1239int bpf__config_obj(struct bpf_object *obj,
1240 struct parse_events_term *term,
1241 struct perf_evlist *evlist,
1242 int *error_pos)
1243{
1244 int key_scan_pos = 0;
1245 int err;
1246
1247 if (!obj || !term || !term->config)
1248 return -EINVAL;
1249
1250 if (strstarts(term->config, "map:")) {
1251 key_scan_pos = sizeof("map:") - 1;
1252 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1253 goto out;
1254 }
1255 err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1256out:
1257 if (error_pos)
1258 *error_pos = key_scan_pos;
1259 return err;
1260
1261}
1262
1263typedef int (*map_config_func_t)(const char *name, int map_fd,
1264 const struct bpf_map_def *pdef,
1265 struct bpf_map_op *op,
1266 void *pkey, void *arg);
1267
1268static int
1269foreach_key_array_all(map_config_func_t func,
1270 void *arg, const char *name,
1271 int map_fd, const struct bpf_map_def *pdef,
1272 struct bpf_map_op *op)
1273{
1274 unsigned int i;
1275 int err;
1276
1277 for (i = 0; i < pdef->max_entries; i++) {
1278 err = func(name, map_fd, pdef, op, &i, arg);
1279 if (err) {
1280 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1281 name, i);
1282 return err;
1283 }
1284 }
1285 return 0;
1286}
1287
1288static int
1289foreach_key_array_ranges(map_config_func_t func, void *arg,
1290 const char *name, int map_fd,
1291 const struct bpf_map_def *pdef,
1292 struct bpf_map_op *op)
1293{
1294 unsigned int i, j;
1295 int err;
1296
1297 for (i = 0; i < op->k.array.nr_ranges; i++) {
1298 unsigned int start = op->k.array.ranges[i].start;
1299 size_t length = op->k.array.ranges[i].length;
1300
1301 for (j = 0; j < length; j++) {
1302 unsigned int idx = start + j;
1303
1304 err = func(name, map_fd, pdef, op, &idx, arg);
1305 if (err) {
1306 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1307 name, idx);
1308 return err;
1309 }
1310 }
1311 }
1312 return 0;
1313}
1314
1315static int
1316bpf_map_config_foreach_key(struct bpf_map *map,
1317 map_config_func_t func,
1318 void *arg)
1319{
1320 int err, map_fd;
1321 struct bpf_map_op *op;
1322 const struct bpf_map_def *def;
1323 const char *name = bpf_map__name(map);
1324 struct bpf_map_priv *priv = bpf_map__priv(map);
1325
1326 if (IS_ERR(priv)) {
1327 pr_debug("ERROR: failed to get private from map %s\n", name);
1328 return -BPF_LOADER_ERRNO__INTERNAL;
1329 }
1330 if (!priv || list_empty(&priv->ops_list)) {
1331 pr_debug("INFO: nothing to config for map %s\n", name);
1332 return 0;
1333 }
1334
1335 def = bpf_map__def(map);
1336 if (IS_ERR(def)) {
1337 pr_debug("ERROR: failed to get definition from map %s\n", name);
1338 return -BPF_LOADER_ERRNO__INTERNAL;
1339 }
1340 map_fd = bpf_map__fd(map);
1341 if (map_fd < 0) {
1342 pr_debug("ERROR: failed to get fd from map %s\n", name);
1343 return map_fd;
1344 }
1345
1346 list_for_each_entry(op, &priv->ops_list, list) {
1347 switch (def->type) {
1348 case BPF_MAP_TYPE_ARRAY:
1349 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1350 switch (op->key_type) {
1351 case BPF_MAP_KEY_ALL:
1352 err = foreach_key_array_all(func, arg, name,
1353 map_fd, def, op);
1354 break;
1355 case BPF_MAP_KEY_RANGES:
1356 err = foreach_key_array_ranges(func, arg, name,
1357 map_fd, def,
1358 op);
1359 break;
1360 default:
1361 pr_debug("ERROR: keytype for map '%s' invalid\n",
1362 name);
1363 return -BPF_LOADER_ERRNO__INTERNAL;
1364 }
1365 if (err)
1366 return err;
1367 break;
1368 default:
1369 pr_debug("ERROR: type of '%s' incorrect\n", name);
1370 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1371 }
1372 }
1373
1374 return 0;
1375}
1376
1377static int
1378apply_config_value_for_key(int map_fd, void *pkey,
1379 size_t val_size, u64 val)
1380{
1381 int err = 0;
1382
1383 switch (val_size) {
1384 case 1: {
1385 u8 _val = (u8)(val);
1386 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1387 break;
1388 }
1389 case 2: {
1390 u16 _val = (u16)(val);
1391 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1392 break;
1393 }
1394 case 4: {
1395 u32 _val = (u32)(val);
1396 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1397 break;
1398 }
1399 case 8: {
1400 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1401 break;
1402 }
1403 default:
1404 pr_debug("ERROR: invalid value size\n");
1405 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1406 }
1407 if (err && errno)
1408 err = -errno;
1409 return err;
1410}
1411
1412static int
1413apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1414 struct perf_evsel *evsel)
1415{
1416 struct xyarray *xy = evsel->fd;
1417 struct perf_event_attr *attr;
1418 unsigned int key, events;
1419 bool check_pass = false;
1420 int *evt_fd;
1421 int err;
1422
1423 if (!xy) {
1424 pr_debug("ERROR: evsel not ready for map %s\n", name);
1425 return -BPF_LOADER_ERRNO__INTERNAL;
1426 }
1427
1428 if (xy->row_size / xy->entry_size != 1) {
1429 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1430 name);
1431 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1432 }
1433
1434 attr = &evsel->attr;
1435 if (attr->inherit) {
1436 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1437 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1438 }
1439
1440 if (perf_evsel__is_bpf_output(evsel))
1441 check_pass = true;
1442 if (attr->type == PERF_TYPE_RAW)
1443 check_pass = true;
1444 if (attr->type == PERF_TYPE_HARDWARE)
1445 check_pass = true;
1446 if (!check_pass) {
1447 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1448 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1449 }
1450
1451 events = xy->entries / (xy->row_size / xy->entry_size);
1452 key = *((unsigned int *)pkey);
1453 if (key >= events) {
1454 pr_debug("ERROR: there is no event %d for map %s\n",
1455 key, name);
1456 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1457 }
1458 evt_fd = xyarray__entry(xy, key, 0);
1459 err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1460 if (err && errno)
1461 err = -errno;
1462 return err;
1463}
1464
1465static int
1466apply_obj_config_map_for_key(const char *name, int map_fd,
1467 const struct bpf_map_def *pdef,
1468 struct bpf_map_op *op,
1469 void *pkey, void *arg __maybe_unused)
1470{
1471 int err;
1472
1473 switch (op->op_type) {
1474 case BPF_MAP_OP_SET_VALUE:
1475 err = apply_config_value_for_key(map_fd, pkey,
1476 pdef->value_size,
1477 op->v.value);
1478 break;
1479 case BPF_MAP_OP_SET_EVSEL:
1480 err = apply_config_evsel_for_key(name, map_fd, pkey,
1481 op->v.evsel);
1482 break;
1483 default:
1484 pr_debug("ERROR: unknown value type for '%s'\n", name);
1485 err = -BPF_LOADER_ERRNO__INTERNAL;
1486 }
1487 return err;
1488}
1489
1490static int
1491apply_obj_config_map(struct bpf_map *map)
1492{
1493 return bpf_map_config_foreach_key(map,
1494 apply_obj_config_map_for_key,
1495 NULL);
1496}
1497
1498static int
1499apply_obj_config_object(struct bpf_object *obj)
1500{
1501 struct bpf_map *map;
1502 int err;
1503
1504 bpf_map__for_each(map, obj) {
1505 err = apply_obj_config_map(map);
1506 if (err)
1507 return err;
1508 }
1509 return 0;
1510}
1511
1512int bpf__apply_obj_config(void)
1513{
1514 struct bpf_object *obj, *tmp;
1515 int err;
1516
1517 bpf_object__for_each_safe(obj, tmp) {
1518 err = apply_obj_config_object(obj);
1519 if (err)
1520 return err;
1521 }
1522
1523 return 0;
1524}
1525
1526#define bpf__for_each_map(pos, obj, objtmp) \
1527 bpf_object__for_each_safe(obj, objtmp) \
1528 bpf_map__for_each(pos, obj)
1529
1530#define bpf__for_each_stdout_map(pos, obj, objtmp) \
1531 bpf__for_each_map(pos, obj, objtmp) \
1532 if (bpf_map__name(pos) && \
1533 (strcmp("__bpf_stdout__", \
1534 bpf_map__name(pos)) == 0))
1535
1536int bpf__setup_stdout(struct perf_evlist *evlist)
1537{
1538 struct bpf_map_priv *tmpl_priv = NULL;
1539 struct bpf_object *obj, *tmp;
1540 struct perf_evsel *evsel = NULL;
1541 struct bpf_map *map;
1542 int err;
1543 bool need_init = false;
1544
1545 bpf__for_each_stdout_map(map, obj, tmp) {
1546 struct bpf_map_priv *priv = bpf_map__priv(map);
1547
1548 if (IS_ERR(priv))
1549 return -BPF_LOADER_ERRNO__INTERNAL;
1550
1551 /*
1552 * No need to check map type: type should have been
1553 * verified by kernel.
1554 */
1555 if (!need_init && !priv)
1556 need_init = !priv;
1557 if (!tmpl_priv && priv)
1558 tmpl_priv = priv;
1559 }
1560
1561 if (!need_init)
1562 return 0;
1563
1564 if (!tmpl_priv) {
1565 err = parse_events(evlist, "bpf-output/no-inherit=1,name=__bpf_stdout__/",
1566 NULL);
1567 if (err) {
1568 pr_debug("ERROR: failed to create bpf-output event\n");
1569 return -err;
1570 }
1571
1572 evsel = perf_evlist__last(evlist);
1573 }
1574
1575 bpf__for_each_stdout_map(map, obj, tmp) {
1576 struct bpf_map_priv *priv = bpf_map__priv(map);
1577
1578 if (IS_ERR(priv))
1579 return -BPF_LOADER_ERRNO__INTERNAL;
1580 if (priv)
1581 continue;
1582
1583 if (tmpl_priv) {
1584 priv = bpf_map_priv__clone(tmpl_priv);
1585 if (!priv)
1586 return -ENOMEM;
1587
1588 err = bpf_map__set_priv(map, priv, bpf_map_priv__clear);
1589 if (err) {
1590 bpf_map_priv__clear(map, priv);
1591 return err;
1592 }
1593 } else if (evsel) {
1594 struct bpf_map_op *op;
1595
1596 op = bpf_map__add_newop(map, NULL);
1597 if (IS_ERR(op))
1598 return PTR_ERR(op);
1599 op->op_type = BPF_MAP_OP_SET_EVSEL;
1600 op->v.evsel = evsel;
1601 }
1602 }
1603
1604 return 0;
1605}
1606
1607#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
1608#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1609#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1610
1611static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1612 [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
1613 [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
1614 [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
1615 [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
1616 [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
1617 [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1618 [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
1619 [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
1620 [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
1621 [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
1622 [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
1623 [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
1624 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
1625 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
1626 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
1627 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
1628 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1629 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
1630 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
1631 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
1632 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
1633 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
1634 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
1635};
1636
1637static int
1638bpf_loader_strerror(int err, char *buf, size_t size)
1639{
1640 char sbuf[STRERR_BUFSIZE];
1641 const char *msg;
1642
1643 if (!buf || !size)
1644 return -1;
1645
1646 err = err > 0 ? err : -err;
1647
1648 if (err >= __LIBBPF_ERRNO__START)
1649 return libbpf_strerror(err, buf, size);
1650
1651 if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1652 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1653 snprintf(buf, size, "%s", msg);
1654 buf[size - 1] = '\0';
1655 return 0;
1656 }
1657
1658 if (err >= __BPF_LOADER_ERRNO__END)
1659 snprintf(buf, size, "Unknown bpf loader error %d", err);
1660 else
1661 snprintf(buf, size, "%s",
1662 str_error_r(err, sbuf, sizeof(sbuf)));
1663
1664 buf[size - 1] = '\0';
1665 return -1;
1666}
1667
1668#define bpf__strerror_head(err, buf, size) \
1669 char sbuf[STRERR_BUFSIZE], *emsg;\
1670 if (!size)\
1671 return 0;\
1672 if (err < 0)\
1673 err = -err;\
1674 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
1675 emsg = sbuf;\
1676 switch (err) {\
1677 default:\
1678 scnprintf(buf, size, "%s", emsg);\
1679 break;
1680
1681#define bpf__strerror_entry(val, fmt...)\
1682 case val: {\
1683 scnprintf(buf, size, fmt);\
1684 break;\
1685 }
1686
1687#define bpf__strerror_end(buf, size)\
1688 }\
1689 buf[size - 1] = '\0';
1690
1691int bpf__strerror_prepare_load(const char *filename, bool source,
1692 int err, char *buf, size_t size)
1693{
1694 size_t n;
1695 int ret;
1696
1697 n = snprintf(buf, size, "Failed to load %s%s: ",
1698 filename, source ? " from source" : "");
1699 if (n >= size) {
1700 buf[size - 1] = '\0';
1701 return 0;
1702 }
1703 buf += n;
1704 size -= n;
1705
1706 ret = bpf_loader_strerror(err, buf, size);
1707 buf[size - 1] = '\0';
1708 return ret;
1709}
1710
1711int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
1712 int err, char *buf, size_t size)
1713{
1714 bpf__strerror_head(err, buf, size);
1715 case BPF_LOADER_ERRNO__PROGCONF_TERM: {
1716 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
1717 break;
1718 }
1719 bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
1720 bpf__strerror_entry(EACCES, "You need to be root");
1721 bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
1722 bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
1723 bpf__strerror_end(buf, size);
1724 return 0;
1725}
1726
1727int bpf__strerror_load(struct bpf_object *obj,
1728 int err, char *buf, size_t size)
1729{
1730 bpf__strerror_head(err, buf, size);
1731 case LIBBPF_ERRNO__KVER: {
1732 unsigned int obj_kver = bpf_object__kversion(obj);
1733 unsigned int real_kver;
1734
1735 if (fetch_kernel_version(&real_kver, NULL, 0)) {
1736 scnprintf(buf, size, "Unable to fetch kernel version");
1737 break;
1738 }
1739
1740 if (obj_kver != real_kver) {
1741 scnprintf(buf, size,
1742 "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
1743 KVER_PARAM(obj_kver),
1744 KVER_PARAM(real_kver));
1745 break;
1746 }
1747
1748 scnprintf(buf, size, "Failed to load program for unknown reason");
1749 break;
1750 }
1751 bpf__strerror_end(buf, size);
1752 return 0;
1753}
1754
1755int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
1756 struct parse_events_term *term __maybe_unused,
1757 struct perf_evlist *evlist __maybe_unused,
1758 int *error_pos __maybe_unused, int err,
1759 char *buf, size_t size)
1760{
1761 bpf__strerror_head(err, buf, size);
1762 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
1763 "Can't use this config term with this map type");
1764 bpf__strerror_end(buf, size);
1765 return 0;
1766}
1767
1768int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
1769{
1770 bpf__strerror_head(err, buf, size);
1771 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
1772 "Cannot set event to BPF map in multi-thread tracing");
1773 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
1774 "%s (Hint: use -i to turn off inherit)", emsg);
1775 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
1776 "Can only put raw, hardware and BPF output event into a BPF map");
1777 bpf__strerror_end(buf, size);
1778 return 0;
1779}
1780
1781int bpf__strerror_setup_stdout(struct perf_evlist *evlist __maybe_unused,
1782 int err, char *buf, size_t size)
1783{
1784 bpf__strerror_head(err, buf, size);
1785 bpf__strerror_end(buf, size);
1786 return 0;
1787}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * bpf-loader.c
4 *
5 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
6 * Copyright (C) 2015 Huawei Inc.
7 */
8
9#include <linux/bpf.h>
10#include <bpf/libbpf.h>
11#include <bpf/bpf.h>
12#include <linux/filter.h>
13#include <linux/err.h>
14#include <linux/kernel.h>
15#include <linux/string.h>
16#include <linux/zalloc.h>
17#include <errno.h>
18#include <stdlib.h>
19#include "debug.h"
20#include "evlist.h"
21#include "bpf-loader.h"
22#include "bpf-prologue.h"
23#include "probe-event.h"
24#include "probe-finder.h" // for MAX_PROBES
25#include "parse-events.h"
26#include "strfilter.h"
27#include "util.h"
28#include "llvm-utils.h"
29#include "c++/clang-c.h"
30#include "util/hashmap.h"
31#include "asm/bug.h"
32
33#include <internal/xyarray.h>
34
35#ifndef HAVE_LIBBPF_BPF_PROGRAM__SET_INSNS
36int bpf_program__set_insns(struct bpf_program *prog __maybe_unused,
37 struct bpf_insn *new_insns __maybe_unused, size_t new_insn_cnt __maybe_unused)
38{
39 pr_err("%s: not support, update libbpf\n", __func__);
40 return -ENOTSUP;
41}
42
43int libbpf_register_prog_handler(const char *sec __maybe_unused,
44 enum bpf_prog_type prog_type __maybe_unused,
45 enum bpf_attach_type exp_attach_type __maybe_unused,
46 const struct libbpf_prog_handler_opts *opts __maybe_unused)
47{
48 pr_err("%s: not support, update libbpf\n", __func__);
49 return -ENOTSUP;
50}
51#endif
52
53/* temporarily disable libbpf deprecation warnings */
54#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
55
56static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)),
57 const char *fmt, va_list args)
58{
59 return veprintf(1, verbose, pr_fmt(fmt), args);
60}
61
62struct bpf_prog_priv {
63 bool is_tp;
64 char *sys_name;
65 char *evt_name;
66 struct perf_probe_event pev;
67 bool need_prologue;
68 struct bpf_insn *insns_buf;
69 int nr_types;
70 int *type_mapping;
71 int *prologue_fds;
72};
73
74struct bpf_perf_object {
75 struct list_head list;
76 struct bpf_object *obj;
77};
78
79struct bpf_preproc_result {
80 struct bpf_insn *new_insn_ptr;
81 int new_insn_cnt;
82};
83
84static LIST_HEAD(bpf_objects_list);
85static struct hashmap *bpf_program_hash;
86static struct hashmap *bpf_map_hash;
87
88static struct bpf_perf_object *
89bpf_perf_object__next(struct bpf_perf_object *prev)
90{
91 if (!prev) {
92 if (list_empty(&bpf_objects_list))
93 return NULL;
94
95 return list_first_entry(&bpf_objects_list, struct bpf_perf_object, list);
96 }
97 if (list_is_last(&prev->list, &bpf_objects_list))
98 return NULL;
99
100 return list_next_entry(prev, list);
101}
102
103#define bpf_perf_object__for_each(perf_obj, tmp) \
104 for ((perf_obj) = bpf_perf_object__next(NULL), \
105 (tmp) = bpf_perf_object__next(perf_obj); \
106 (perf_obj) != NULL; \
107 (perf_obj) = (tmp), (tmp) = bpf_perf_object__next(tmp))
108
109static bool libbpf_initialized;
110static int libbpf_sec_handler;
111
112static int bpf_perf_object__add(struct bpf_object *obj)
113{
114 struct bpf_perf_object *perf_obj = zalloc(sizeof(*perf_obj));
115
116 if (perf_obj) {
117 INIT_LIST_HEAD(&perf_obj->list);
118 perf_obj->obj = obj;
119 list_add_tail(&perf_obj->list, &bpf_objects_list);
120 }
121 return perf_obj ? 0 : -ENOMEM;
122}
123
124static void *program_priv(const struct bpf_program *prog)
125{
126 void *priv;
127
128 if (IS_ERR_OR_NULL(bpf_program_hash))
129 return NULL;
130 if (!hashmap__find(bpf_program_hash, prog, &priv))
131 return NULL;
132 return priv;
133}
134
135static struct bpf_insn prologue_init_insn[] = {
136 BPF_MOV64_IMM(BPF_REG_2, 0),
137 BPF_MOV64_IMM(BPF_REG_3, 0),
138 BPF_MOV64_IMM(BPF_REG_4, 0),
139 BPF_MOV64_IMM(BPF_REG_5, 0),
140};
141
142static int libbpf_prog_prepare_load_fn(struct bpf_program *prog,
143 struct bpf_prog_load_opts *opts __maybe_unused,
144 long cookie __maybe_unused)
145{
146 size_t init_size_cnt = ARRAY_SIZE(prologue_init_insn);
147 size_t orig_insn_cnt, insn_cnt, init_size, orig_size;
148 struct bpf_prog_priv *priv = program_priv(prog);
149 const struct bpf_insn *orig_insn;
150 struct bpf_insn *insn;
151
152 if (IS_ERR_OR_NULL(priv)) {
153 pr_debug("bpf: failed to get private field\n");
154 return -BPF_LOADER_ERRNO__INTERNAL;
155 }
156
157 if (!priv->need_prologue)
158 return 0;
159
160 /* prepend initialization code to program instructions */
161 orig_insn = bpf_program__insns(prog);
162 orig_insn_cnt = bpf_program__insn_cnt(prog);
163 init_size = init_size_cnt * sizeof(*insn);
164 orig_size = orig_insn_cnt * sizeof(*insn);
165
166 insn_cnt = orig_insn_cnt + init_size_cnt;
167 insn = malloc(insn_cnt * sizeof(*insn));
168 if (!insn)
169 return -ENOMEM;
170
171 memcpy(insn, prologue_init_insn, init_size);
172 memcpy((char *) insn + init_size, orig_insn, orig_size);
173 bpf_program__set_insns(prog, insn, insn_cnt);
174 return 0;
175}
176
177static int libbpf_init(void)
178{
179 LIBBPF_OPTS(libbpf_prog_handler_opts, handler_opts,
180 .prog_prepare_load_fn = libbpf_prog_prepare_load_fn,
181 );
182
183 if (libbpf_initialized)
184 return 0;
185
186 libbpf_set_print(libbpf_perf_print);
187 libbpf_sec_handler = libbpf_register_prog_handler(NULL, BPF_PROG_TYPE_KPROBE,
188 0, &handler_opts);
189 if (libbpf_sec_handler < 0) {
190 pr_debug("bpf: failed to register libbpf section handler: %d\n",
191 libbpf_sec_handler);
192 return -BPF_LOADER_ERRNO__INTERNAL;
193 }
194 libbpf_initialized = true;
195 return 0;
196}
197
198struct bpf_object *
199bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name)
200{
201 LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = name);
202 struct bpf_object *obj;
203 int err;
204
205 err = libbpf_init();
206 if (err)
207 return ERR_PTR(err);
208
209 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
210 if (IS_ERR_OR_NULL(obj)) {
211 pr_debug("bpf: failed to load buffer\n");
212 return ERR_PTR(-EINVAL);
213 }
214
215 if (bpf_perf_object__add(obj)) {
216 bpf_object__close(obj);
217 return ERR_PTR(-ENOMEM);
218 }
219
220 return obj;
221}
222
223static void bpf_perf_object__close(struct bpf_perf_object *perf_obj)
224{
225 list_del(&perf_obj->list);
226 bpf_object__close(perf_obj->obj);
227 free(perf_obj);
228}
229
230struct bpf_object *bpf__prepare_load(const char *filename, bool source)
231{
232 LIBBPF_OPTS(bpf_object_open_opts, opts, .object_name = filename);
233 struct bpf_object *obj;
234 int err;
235
236 err = libbpf_init();
237 if (err)
238 return ERR_PTR(err);
239
240 if (source) {
241 void *obj_buf;
242 size_t obj_buf_sz;
243
244 perf_clang__init();
245 err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz);
246 perf_clang__cleanup();
247 if (err) {
248 pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err);
249 err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz);
250 if (err)
251 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
252 } else
253 pr_debug("bpf: successful builtin compilation\n");
254 obj = bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
255
256 if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj)
257 llvm__dump_obj(filename, obj_buf, obj_buf_sz);
258
259 free(obj_buf);
260 } else {
261 obj = bpf_object__open(filename);
262 }
263
264 if (IS_ERR_OR_NULL(obj)) {
265 pr_debug("bpf: failed to load %s\n", filename);
266 return obj;
267 }
268
269 if (bpf_perf_object__add(obj)) {
270 bpf_object__close(obj);
271 return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE);
272 }
273
274 return obj;
275}
276
277static void close_prologue_programs(struct bpf_prog_priv *priv)
278{
279 struct perf_probe_event *pev;
280 int i, fd;
281
282 if (!priv->need_prologue)
283 return;
284 pev = &priv->pev;
285 for (i = 0; i < pev->ntevs; i++) {
286 fd = priv->prologue_fds[i];
287 if (fd != -1)
288 close(fd);
289 }
290}
291
292static void
293clear_prog_priv(const struct bpf_program *prog __maybe_unused,
294 void *_priv)
295{
296 struct bpf_prog_priv *priv = _priv;
297
298 close_prologue_programs(priv);
299 cleanup_perf_probe_events(&priv->pev, 1);
300 zfree(&priv->insns_buf);
301 zfree(&priv->prologue_fds);
302 zfree(&priv->type_mapping);
303 zfree(&priv->sys_name);
304 zfree(&priv->evt_name);
305 free(priv);
306}
307
308static void bpf_program_hash_free(void)
309{
310 struct hashmap_entry *cur;
311 size_t bkt;
312
313 if (IS_ERR_OR_NULL(bpf_program_hash))
314 return;
315
316 hashmap__for_each_entry(bpf_program_hash, cur, bkt)
317 clear_prog_priv(cur->pkey, cur->pvalue);
318
319 hashmap__free(bpf_program_hash);
320 bpf_program_hash = NULL;
321}
322
323static void bpf_map_hash_free(void);
324
325void bpf__clear(void)
326{
327 struct bpf_perf_object *perf_obj, *tmp;
328
329 bpf_perf_object__for_each(perf_obj, tmp) {
330 bpf__unprobe(perf_obj->obj);
331 bpf_perf_object__close(perf_obj);
332 }
333
334 bpf_program_hash_free();
335 bpf_map_hash_free();
336}
337
338static size_t ptr_hash(const long __key, void *ctx __maybe_unused)
339{
340 return __key;
341}
342
343static bool ptr_equal(long key1, long key2, void *ctx __maybe_unused)
344{
345 return key1 == key2;
346}
347
348static int program_set_priv(struct bpf_program *prog, void *priv)
349{
350 void *old_priv;
351
352 /*
353 * Should not happen, we warn about it in the
354 * caller function - config_bpf_program
355 */
356 if (IS_ERR(bpf_program_hash))
357 return PTR_ERR(bpf_program_hash);
358
359 if (!bpf_program_hash) {
360 bpf_program_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
361 if (IS_ERR(bpf_program_hash))
362 return PTR_ERR(bpf_program_hash);
363 }
364
365 old_priv = program_priv(prog);
366 if (old_priv) {
367 clear_prog_priv(prog, old_priv);
368 return hashmap__set(bpf_program_hash, prog, priv, NULL, NULL);
369 }
370 return hashmap__add(bpf_program_hash, prog, priv);
371}
372
373static int
374prog_config__exec(const char *value, struct perf_probe_event *pev)
375{
376 pev->uprobes = true;
377 pev->target = strdup(value);
378 if (!pev->target)
379 return -ENOMEM;
380 return 0;
381}
382
383static int
384prog_config__module(const char *value, struct perf_probe_event *pev)
385{
386 pev->uprobes = false;
387 pev->target = strdup(value);
388 if (!pev->target)
389 return -ENOMEM;
390 return 0;
391}
392
393static int
394prog_config__bool(const char *value, bool *pbool, bool invert)
395{
396 int err;
397 bool bool_value;
398
399 if (!pbool)
400 return -EINVAL;
401
402 err = strtobool(value, &bool_value);
403 if (err)
404 return err;
405
406 *pbool = invert ? !bool_value : bool_value;
407 return 0;
408}
409
410static int
411prog_config__inlines(const char *value,
412 struct perf_probe_event *pev __maybe_unused)
413{
414 return prog_config__bool(value, &probe_conf.no_inlines, true);
415}
416
417static int
418prog_config__force(const char *value,
419 struct perf_probe_event *pev __maybe_unused)
420{
421 return prog_config__bool(value, &probe_conf.force_add, false);
422}
423
424static struct {
425 const char *key;
426 const char *usage;
427 const char *desc;
428 int (*func)(const char *, struct perf_probe_event *);
429} bpf_prog_config_terms[] = {
430 {
431 .key = "exec",
432 .usage = "exec=<full path of file>",
433 .desc = "Set uprobe target",
434 .func = prog_config__exec,
435 },
436 {
437 .key = "module",
438 .usage = "module=<module name> ",
439 .desc = "Set kprobe module",
440 .func = prog_config__module,
441 },
442 {
443 .key = "inlines",
444 .usage = "inlines=[yes|no] ",
445 .desc = "Probe at inline symbol",
446 .func = prog_config__inlines,
447 },
448 {
449 .key = "force",
450 .usage = "force=[yes|no] ",
451 .desc = "Forcibly add events with existing name",
452 .func = prog_config__force,
453 },
454};
455
456static int
457do_prog_config(const char *key, const char *value,
458 struct perf_probe_event *pev)
459{
460 unsigned int i;
461
462 pr_debug("config bpf program: %s=%s\n", key, value);
463 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
464 if (strcmp(key, bpf_prog_config_terms[i].key) == 0)
465 return bpf_prog_config_terms[i].func(value, pev);
466
467 pr_debug("BPF: ERROR: invalid program config option: %s=%s\n",
468 key, value);
469
470 pr_debug("\nHint: Valid options are:\n");
471 for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++)
472 pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage,
473 bpf_prog_config_terms[i].desc);
474 pr_debug("\n");
475
476 return -BPF_LOADER_ERRNO__PROGCONF_TERM;
477}
478
479static const char *
480parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev)
481{
482 char *text = strdup(config_str);
483 char *sep, *line;
484 const char *main_str = NULL;
485 int err = 0;
486
487 if (!text) {
488 pr_debug("Not enough memory: dup config_str failed\n");
489 return ERR_PTR(-ENOMEM);
490 }
491
492 line = text;
493 while ((sep = strchr(line, ';'))) {
494 char *equ;
495
496 *sep = '\0';
497 equ = strchr(line, '=');
498 if (!equ) {
499 pr_warning("WARNING: invalid config in BPF object: %s\n",
500 line);
501 pr_warning("\tShould be 'key=value'.\n");
502 goto nextline;
503 }
504 *equ = '\0';
505
506 err = do_prog_config(line, equ + 1, pev);
507 if (err)
508 break;
509nextline:
510 line = sep + 1;
511 }
512
513 if (!err)
514 main_str = config_str + (line - text);
515 free(text);
516
517 return err ? ERR_PTR(err) : main_str;
518}
519
520static int
521parse_prog_config(const char *config_str, const char **p_main_str,
522 bool *is_tp, struct perf_probe_event *pev)
523{
524 int err;
525 const char *main_str = parse_prog_config_kvpair(config_str, pev);
526
527 if (IS_ERR(main_str))
528 return PTR_ERR(main_str);
529
530 *p_main_str = main_str;
531 if (!strchr(main_str, '=')) {
532 /* Is a tracepoint event? */
533 const char *s = strchr(main_str, ':');
534
535 if (!s) {
536 pr_debug("bpf: '%s' is not a valid tracepoint\n",
537 config_str);
538 return -BPF_LOADER_ERRNO__CONFIG;
539 }
540
541 *is_tp = true;
542 return 0;
543 }
544
545 *is_tp = false;
546 err = parse_perf_probe_command(main_str, pev);
547 if (err < 0) {
548 pr_debug("bpf: '%s' is not a valid config string\n",
549 config_str);
550 /* parse failed, don't need clear pev. */
551 return -BPF_LOADER_ERRNO__CONFIG;
552 }
553 return 0;
554}
555
556static int
557config_bpf_program(struct bpf_program *prog)
558{
559 struct perf_probe_event *pev = NULL;
560 struct bpf_prog_priv *priv = NULL;
561 const char *config_str, *main_str;
562 bool is_tp = false;
563 int err;
564
565 /* Initialize per-program probing setting */
566 probe_conf.no_inlines = false;
567 probe_conf.force_add = false;
568
569 priv = calloc(sizeof(*priv), 1);
570 if (!priv) {
571 pr_debug("bpf: failed to alloc priv\n");
572 return -ENOMEM;
573 }
574 pev = &priv->pev;
575
576 config_str = bpf_program__section_name(prog);
577 pr_debug("bpf: config program '%s'\n", config_str);
578 err = parse_prog_config(config_str, &main_str, &is_tp, pev);
579 if (err)
580 goto errout;
581
582 if (is_tp) {
583 char *s = strchr(main_str, ':');
584
585 priv->is_tp = true;
586 priv->sys_name = strndup(main_str, s - main_str);
587 priv->evt_name = strdup(s + 1);
588 goto set_priv;
589 }
590
591 if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) {
592 pr_debug("bpf: '%s': group for event is set and not '%s'.\n",
593 config_str, PERF_BPF_PROBE_GROUP);
594 err = -BPF_LOADER_ERRNO__GROUP;
595 goto errout;
596 } else if (!pev->group)
597 pev->group = strdup(PERF_BPF_PROBE_GROUP);
598
599 if (!pev->group) {
600 pr_debug("bpf: strdup failed\n");
601 err = -ENOMEM;
602 goto errout;
603 }
604
605 if (!pev->event) {
606 pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n",
607 config_str);
608 err = -BPF_LOADER_ERRNO__EVENTNAME;
609 goto errout;
610 }
611 pr_debug("bpf: config '%s' is ok\n", config_str);
612
613set_priv:
614 err = program_set_priv(prog, priv);
615 if (err) {
616 pr_debug("Failed to set priv for program '%s'\n", config_str);
617 goto errout;
618 }
619
620 return 0;
621
622errout:
623 if (pev)
624 clear_perf_probe_event(pev);
625 free(priv);
626 return err;
627}
628
629static int bpf__prepare_probe(void)
630{
631 static int err = 0;
632 static bool initialized = false;
633
634 /*
635 * Make err static, so if init failed the first, bpf__prepare_probe()
636 * fails each time without calling init_probe_symbol_maps multiple
637 * times.
638 */
639 if (initialized)
640 return err;
641
642 initialized = true;
643 err = init_probe_symbol_maps(false);
644 if (err < 0)
645 pr_debug("Failed to init_probe_symbol_maps\n");
646 probe_conf.max_probes = MAX_PROBES;
647 return err;
648}
649
650static int
651preproc_gen_prologue(struct bpf_program *prog, int n,
652 const struct bpf_insn *orig_insns, int orig_insns_cnt,
653 struct bpf_preproc_result *res)
654{
655 struct bpf_prog_priv *priv = program_priv(prog);
656 struct probe_trace_event *tev;
657 struct perf_probe_event *pev;
658 struct bpf_insn *buf;
659 size_t prologue_cnt = 0;
660 int i, err;
661
662 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
663 goto errout;
664
665 pev = &priv->pev;
666
667 if (n < 0 || n >= priv->nr_types)
668 goto errout;
669
670 /* Find a tev belongs to that type */
671 for (i = 0; i < pev->ntevs; i++) {
672 if (priv->type_mapping[i] == n)
673 break;
674 }
675
676 if (i >= pev->ntevs) {
677 pr_debug("Internal error: prologue type %d not found\n", n);
678 return -BPF_LOADER_ERRNO__PROLOGUE;
679 }
680
681 tev = &pev->tevs[i];
682
683 buf = priv->insns_buf;
684 err = bpf__gen_prologue(tev->args, tev->nargs,
685 buf, &prologue_cnt,
686 BPF_MAXINSNS - orig_insns_cnt);
687 if (err) {
688 const char *title;
689
690 title = bpf_program__section_name(prog);
691 pr_debug("Failed to generate prologue for program %s\n",
692 title);
693 return err;
694 }
695
696 memcpy(&buf[prologue_cnt], orig_insns,
697 sizeof(struct bpf_insn) * orig_insns_cnt);
698
699 res->new_insn_ptr = buf;
700 res->new_insn_cnt = prologue_cnt + orig_insns_cnt;
701 return 0;
702
703errout:
704 pr_debug("Internal error in preproc_gen_prologue\n");
705 return -BPF_LOADER_ERRNO__PROLOGUE;
706}
707
708/*
709 * compare_tev_args is reflexive, transitive and antisymmetric.
710 * I can proof it but this margin is too narrow to contain.
711 */
712static int compare_tev_args(const void *ptev1, const void *ptev2)
713{
714 int i, ret;
715 const struct probe_trace_event *tev1 =
716 *(const struct probe_trace_event **)ptev1;
717 const struct probe_trace_event *tev2 =
718 *(const struct probe_trace_event **)ptev2;
719
720 ret = tev2->nargs - tev1->nargs;
721 if (ret)
722 return ret;
723
724 for (i = 0; i < tev1->nargs; i++) {
725 struct probe_trace_arg *arg1, *arg2;
726 struct probe_trace_arg_ref *ref1, *ref2;
727
728 arg1 = &tev1->args[i];
729 arg2 = &tev2->args[i];
730
731 ret = strcmp(arg1->value, arg2->value);
732 if (ret)
733 return ret;
734
735 ref1 = arg1->ref;
736 ref2 = arg2->ref;
737
738 while (ref1 && ref2) {
739 ret = ref2->offset - ref1->offset;
740 if (ret)
741 return ret;
742
743 ref1 = ref1->next;
744 ref2 = ref2->next;
745 }
746
747 if (ref1 || ref2)
748 return ref2 ? 1 : -1;
749 }
750
751 return 0;
752}
753
754/*
755 * Assign a type number to each tevs in a pev.
756 * mapping is an array with same slots as tevs in that pev.
757 * nr_types will be set to number of types.
758 */
759static int map_prologue(struct perf_probe_event *pev, int *mapping,
760 int *nr_types)
761{
762 int i, type = 0;
763 struct probe_trace_event **ptevs;
764
765 size_t array_sz = sizeof(*ptevs) * pev->ntevs;
766
767 ptevs = malloc(array_sz);
768 if (!ptevs) {
769 pr_debug("Not enough memory: alloc ptevs failed\n");
770 return -ENOMEM;
771 }
772
773 pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs);
774 for (i = 0; i < pev->ntevs; i++)
775 ptevs[i] = &pev->tevs[i];
776
777 qsort(ptevs, pev->ntevs, sizeof(*ptevs),
778 compare_tev_args);
779
780 for (i = 0; i < pev->ntevs; i++) {
781 int n;
782
783 n = ptevs[i] - pev->tevs;
784 if (i == 0) {
785 mapping[n] = type;
786 pr_debug("mapping[%d]=%d\n", n, type);
787 continue;
788 }
789
790 if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0)
791 mapping[n] = type;
792 else
793 mapping[n] = ++type;
794
795 pr_debug("mapping[%d]=%d\n", n, mapping[n]);
796 }
797 free(ptevs);
798 *nr_types = type + 1;
799
800 return 0;
801}
802
803static int hook_load_preprocessor(struct bpf_program *prog)
804{
805 struct bpf_prog_priv *priv = program_priv(prog);
806 struct perf_probe_event *pev;
807 bool need_prologue = false;
808 int i;
809
810 if (IS_ERR_OR_NULL(priv)) {
811 pr_debug("Internal error when hook preprocessor\n");
812 return -BPF_LOADER_ERRNO__INTERNAL;
813 }
814
815 if (priv->is_tp) {
816 priv->need_prologue = false;
817 return 0;
818 }
819
820 pev = &priv->pev;
821 for (i = 0; i < pev->ntevs; i++) {
822 struct probe_trace_event *tev = &pev->tevs[i];
823
824 if (tev->nargs > 0) {
825 need_prologue = true;
826 break;
827 }
828 }
829
830 /*
831 * Since all tevs don't have argument, we don't need generate
832 * prologue.
833 */
834 if (!need_prologue) {
835 priv->need_prologue = false;
836 return 0;
837 }
838
839 priv->need_prologue = true;
840 priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS);
841 if (!priv->insns_buf) {
842 pr_debug("Not enough memory: alloc insns_buf failed\n");
843 return -ENOMEM;
844 }
845
846 priv->prologue_fds = malloc(sizeof(int) * pev->ntevs);
847 if (!priv->prologue_fds) {
848 pr_debug("Not enough memory: alloc prologue fds failed\n");
849 return -ENOMEM;
850 }
851 memset(priv->prologue_fds, -1, sizeof(int) * pev->ntevs);
852
853 priv->type_mapping = malloc(sizeof(int) * pev->ntevs);
854 if (!priv->type_mapping) {
855 pr_debug("Not enough memory: alloc type_mapping failed\n");
856 return -ENOMEM;
857 }
858 memset(priv->type_mapping, -1,
859 sizeof(int) * pev->ntevs);
860
861 return map_prologue(pev, priv->type_mapping, &priv->nr_types);
862}
863
864int bpf__probe(struct bpf_object *obj)
865{
866 int err = 0;
867 struct bpf_program *prog;
868 struct bpf_prog_priv *priv;
869 struct perf_probe_event *pev;
870
871 err = bpf__prepare_probe();
872 if (err) {
873 pr_debug("bpf__prepare_probe failed\n");
874 return err;
875 }
876
877 bpf_object__for_each_program(prog, obj) {
878 err = config_bpf_program(prog);
879 if (err)
880 goto out;
881
882 priv = program_priv(prog);
883 if (IS_ERR_OR_NULL(priv)) {
884 if (!priv)
885 err = -BPF_LOADER_ERRNO__INTERNAL;
886 else
887 err = PTR_ERR(priv);
888 goto out;
889 }
890
891 if (priv->is_tp) {
892 bpf_program__set_type(prog, BPF_PROG_TYPE_TRACEPOINT);
893 continue;
894 }
895
896 bpf_program__set_type(prog, BPF_PROG_TYPE_KPROBE);
897 pev = &priv->pev;
898
899 err = convert_perf_probe_events(pev, 1);
900 if (err < 0) {
901 pr_debug("bpf_probe: failed to convert perf probe events\n");
902 goto out;
903 }
904
905 err = apply_perf_probe_events(pev, 1);
906 if (err < 0) {
907 pr_debug("bpf_probe: failed to apply perf probe events\n");
908 goto out;
909 }
910
911 /*
912 * After probing, let's consider prologue, which
913 * adds program fetcher to BPF programs.
914 *
915 * hook_load_preprocessor() hooks pre-processor
916 * to bpf_program, let it generate prologue
917 * dynamically during loading.
918 */
919 err = hook_load_preprocessor(prog);
920 if (err)
921 goto out;
922 }
923out:
924 return err < 0 ? err : 0;
925}
926
927#define EVENTS_WRITE_BUFSIZE 4096
928int bpf__unprobe(struct bpf_object *obj)
929{
930 int err, ret = 0;
931 struct bpf_program *prog;
932
933 bpf_object__for_each_program(prog, obj) {
934 struct bpf_prog_priv *priv = program_priv(prog);
935 int i;
936
937 if (IS_ERR_OR_NULL(priv) || priv->is_tp)
938 continue;
939
940 for (i = 0; i < priv->pev.ntevs; i++) {
941 struct probe_trace_event *tev = &priv->pev.tevs[i];
942 char name_buf[EVENTS_WRITE_BUFSIZE];
943 struct strfilter *delfilter;
944
945 snprintf(name_buf, EVENTS_WRITE_BUFSIZE,
946 "%s:%s", tev->group, tev->event);
947 name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0';
948
949 delfilter = strfilter__new(name_buf, NULL);
950 if (!delfilter) {
951 pr_debug("Failed to create filter for unprobing\n");
952 ret = -ENOMEM;
953 continue;
954 }
955
956 err = del_perf_probe_events(delfilter);
957 strfilter__delete(delfilter);
958 if (err) {
959 pr_debug("Failed to delete %s\n", name_buf);
960 ret = err;
961 continue;
962 }
963 }
964 }
965 return ret;
966}
967
968static int bpf_object__load_prologue(struct bpf_object *obj)
969{
970 int init_cnt = ARRAY_SIZE(prologue_init_insn);
971 const struct bpf_insn *orig_insns;
972 struct bpf_preproc_result res;
973 struct perf_probe_event *pev;
974 struct bpf_program *prog;
975 int orig_insns_cnt;
976
977 bpf_object__for_each_program(prog, obj) {
978 struct bpf_prog_priv *priv = program_priv(prog);
979 int err, i, fd;
980
981 if (IS_ERR_OR_NULL(priv)) {
982 pr_debug("bpf: failed to get private field\n");
983 return -BPF_LOADER_ERRNO__INTERNAL;
984 }
985
986 if (!priv->need_prologue)
987 continue;
988
989 /*
990 * For each program that needs prologue we do following:
991 *
992 * - take its current instructions and use them
993 * to generate the new code with prologue
994 * - load new instructions with bpf_prog_load
995 * and keep the fd in prologue_fds
996 * - new fd will be used in bpf__foreach_event
997 * to connect this program with perf evsel
998 */
999 orig_insns = bpf_program__insns(prog);
1000 orig_insns_cnt = bpf_program__insn_cnt(prog);
1001
1002 pev = &priv->pev;
1003 for (i = 0; i < pev->ntevs; i++) {
1004 /*
1005 * Skipping artificall prologue_init_insn instructions
1006 * (init_cnt), so the prologue can be generated instead
1007 * of them.
1008 */
1009 err = preproc_gen_prologue(prog, i,
1010 orig_insns + init_cnt,
1011 orig_insns_cnt - init_cnt,
1012 &res);
1013 if (err)
1014 return err;
1015
1016 fd = bpf_prog_load(bpf_program__get_type(prog),
1017 bpf_program__name(prog), "GPL",
1018 res.new_insn_ptr,
1019 res.new_insn_cnt, NULL);
1020 if (fd < 0) {
1021 char bf[128];
1022
1023 libbpf_strerror(-errno, bf, sizeof(bf));
1024 pr_debug("bpf: load objects with prologue failed: err=%d: (%s)\n",
1025 -errno, bf);
1026 return -errno;
1027 }
1028 priv->prologue_fds[i] = fd;
1029 }
1030 /*
1031 * We no longer need the original program,
1032 * we can unload it.
1033 */
1034 bpf_program__unload(prog);
1035 }
1036 return 0;
1037}
1038
1039int bpf__load(struct bpf_object *obj)
1040{
1041 int err;
1042
1043 err = bpf_object__load(obj);
1044 if (err) {
1045 char bf[128];
1046 libbpf_strerror(err, bf, sizeof(bf));
1047 pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf);
1048 return err;
1049 }
1050 return bpf_object__load_prologue(obj);
1051}
1052
1053int bpf__foreach_event(struct bpf_object *obj,
1054 bpf_prog_iter_callback_t func,
1055 void *arg)
1056{
1057 struct bpf_program *prog;
1058 int err;
1059
1060 bpf_object__for_each_program(prog, obj) {
1061 struct bpf_prog_priv *priv = program_priv(prog);
1062 struct probe_trace_event *tev;
1063 struct perf_probe_event *pev;
1064 int i, fd;
1065
1066 if (IS_ERR_OR_NULL(priv)) {
1067 pr_debug("bpf: failed to get private field\n");
1068 return -BPF_LOADER_ERRNO__INTERNAL;
1069 }
1070
1071 if (priv->is_tp) {
1072 fd = bpf_program__fd(prog);
1073 err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg);
1074 if (err) {
1075 pr_debug("bpf: tracepoint call back failed, stop iterate\n");
1076 return err;
1077 }
1078 continue;
1079 }
1080
1081 pev = &priv->pev;
1082 for (i = 0; i < pev->ntevs; i++) {
1083 tev = &pev->tevs[i];
1084
1085 if (priv->need_prologue)
1086 fd = priv->prologue_fds[i];
1087 else
1088 fd = bpf_program__fd(prog);
1089
1090 if (fd < 0) {
1091 pr_debug("bpf: failed to get file descriptor\n");
1092 return fd;
1093 }
1094
1095 err = (*func)(tev->group, tev->event, fd, obj, arg);
1096 if (err) {
1097 pr_debug("bpf: call back failed, stop iterate\n");
1098 return err;
1099 }
1100 }
1101 }
1102 return 0;
1103}
1104
1105enum bpf_map_op_type {
1106 BPF_MAP_OP_SET_VALUE,
1107 BPF_MAP_OP_SET_EVSEL,
1108};
1109
1110enum bpf_map_key_type {
1111 BPF_MAP_KEY_ALL,
1112 BPF_MAP_KEY_RANGES,
1113};
1114
1115struct bpf_map_op {
1116 struct list_head list;
1117 enum bpf_map_op_type op_type;
1118 enum bpf_map_key_type key_type;
1119 union {
1120 struct parse_events_array array;
1121 } k;
1122 union {
1123 u64 value;
1124 struct evsel *evsel;
1125 } v;
1126};
1127
1128struct bpf_map_priv {
1129 struct list_head ops_list;
1130};
1131
1132static void
1133bpf_map_op__delete(struct bpf_map_op *op)
1134{
1135 if (!list_empty(&op->list))
1136 list_del_init(&op->list);
1137 if (op->key_type == BPF_MAP_KEY_RANGES)
1138 parse_events__clear_array(&op->k.array);
1139 free(op);
1140}
1141
1142static void
1143bpf_map_priv__purge(struct bpf_map_priv *priv)
1144{
1145 struct bpf_map_op *pos, *n;
1146
1147 list_for_each_entry_safe(pos, n, &priv->ops_list, list) {
1148 list_del_init(&pos->list);
1149 bpf_map_op__delete(pos);
1150 }
1151}
1152
1153static void
1154bpf_map_priv__clear(const struct bpf_map *map __maybe_unused,
1155 void *_priv)
1156{
1157 struct bpf_map_priv *priv = _priv;
1158
1159 bpf_map_priv__purge(priv);
1160 free(priv);
1161}
1162
1163static void *map_priv(const struct bpf_map *map)
1164{
1165 void *priv;
1166
1167 if (IS_ERR_OR_NULL(bpf_map_hash))
1168 return NULL;
1169 if (!hashmap__find(bpf_map_hash, map, &priv))
1170 return NULL;
1171 return priv;
1172}
1173
1174static void bpf_map_hash_free(void)
1175{
1176 struct hashmap_entry *cur;
1177 size_t bkt;
1178
1179 if (IS_ERR_OR_NULL(bpf_map_hash))
1180 return;
1181
1182 hashmap__for_each_entry(bpf_map_hash, cur, bkt)
1183 bpf_map_priv__clear(cur->pkey, cur->pvalue);
1184
1185 hashmap__free(bpf_map_hash);
1186 bpf_map_hash = NULL;
1187}
1188
1189static int map_set_priv(struct bpf_map *map, void *priv)
1190{
1191 void *old_priv;
1192
1193 if (WARN_ON_ONCE(IS_ERR(bpf_map_hash)))
1194 return PTR_ERR(bpf_program_hash);
1195
1196 if (!bpf_map_hash) {
1197 bpf_map_hash = hashmap__new(ptr_hash, ptr_equal, NULL);
1198 if (IS_ERR(bpf_map_hash))
1199 return PTR_ERR(bpf_map_hash);
1200 }
1201
1202 old_priv = map_priv(map);
1203 if (old_priv) {
1204 bpf_map_priv__clear(map, old_priv);
1205 return hashmap__set(bpf_map_hash, map, priv, NULL, NULL);
1206 }
1207 return hashmap__add(bpf_map_hash, map, priv);
1208}
1209
1210static int
1211bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term)
1212{
1213 op->key_type = BPF_MAP_KEY_ALL;
1214 if (!term)
1215 return 0;
1216
1217 if (term->array.nr_ranges) {
1218 size_t memsz = term->array.nr_ranges *
1219 sizeof(op->k.array.ranges[0]);
1220
1221 op->k.array.ranges = memdup(term->array.ranges, memsz);
1222 if (!op->k.array.ranges) {
1223 pr_debug("Not enough memory to alloc indices for map\n");
1224 return -ENOMEM;
1225 }
1226 op->key_type = BPF_MAP_KEY_RANGES;
1227 op->k.array.nr_ranges = term->array.nr_ranges;
1228 }
1229 return 0;
1230}
1231
1232static struct bpf_map_op *
1233bpf_map_op__new(struct parse_events_term *term)
1234{
1235 struct bpf_map_op *op;
1236 int err;
1237
1238 op = zalloc(sizeof(*op));
1239 if (!op) {
1240 pr_debug("Failed to alloc bpf_map_op\n");
1241 return ERR_PTR(-ENOMEM);
1242 }
1243 INIT_LIST_HEAD(&op->list);
1244
1245 err = bpf_map_op_setkey(op, term);
1246 if (err) {
1247 free(op);
1248 return ERR_PTR(err);
1249 }
1250 return op;
1251}
1252
1253static struct bpf_map_op *
1254bpf_map_op__clone(struct bpf_map_op *op)
1255{
1256 struct bpf_map_op *newop;
1257
1258 newop = memdup(op, sizeof(*op));
1259 if (!newop) {
1260 pr_debug("Failed to alloc bpf_map_op\n");
1261 return NULL;
1262 }
1263
1264 INIT_LIST_HEAD(&newop->list);
1265 if (op->key_type == BPF_MAP_KEY_RANGES) {
1266 size_t memsz = op->k.array.nr_ranges *
1267 sizeof(op->k.array.ranges[0]);
1268
1269 newop->k.array.ranges = memdup(op->k.array.ranges, memsz);
1270 if (!newop->k.array.ranges) {
1271 pr_debug("Failed to alloc indices for map\n");
1272 free(newop);
1273 return NULL;
1274 }
1275 }
1276
1277 return newop;
1278}
1279
1280static struct bpf_map_priv *
1281bpf_map_priv__clone(struct bpf_map_priv *priv)
1282{
1283 struct bpf_map_priv *newpriv;
1284 struct bpf_map_op *pos, *newop;
1285
1286 newpriv = zalloc(sizeof(*newpriv));
1287 if (!newpriv) {
1288 pr_debug("Not enough memory to alloc map private\n");
1289 return NULL;
1290 }
1291 INIT_LIST_HEAD(&newpriv->ops_list);
1292
1293 list_for_each_entry(pos, &priv->ops_list, list) {
1294 newop = bpf_map_op__clone(pos);
1295 if (!newop) {
1296 bpf_map_priv__purge(newpriv);
1297 return NULL;
1298 }
1299 list_add_tail(&newop->list, &newpriv->ops_list);
1300 }
1301
1302 return newpriv;
1303}
1304
1305static int
1306bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op)
1307{
1308 const char *map_name = bpf_map__name(map);
1309 struct bpf_map_priv *priv = map_priv(map);
1310
1311 if (IS_ERR(priv)) {
1312 pr_debug("Failed to get private from map %s\n", map_name);
1313 return PTR_ERR(priv);
1314 }
1315
1316 if (!priv) {
1317 priv = zalloc(sizeof(*priv));
1318 if (!priv) {
1319 pr_debug("Not enough memory to alloc map private\n");
1320 return -ENOMEM;
1321 }
1322 INIT_LIST_HEAD(&priv->ops_list);
1323
1324 if (map_set_priv(map, priv)) {
1325 free(priv);
1326 return -BPF_LOADER_ERRNO__INTERNAL;
1327 }
1328 }
1329
1330 list_add_tail(&op->list, &priv->ops_list);
1331 return 0;
1332}
1333
1334static struct bpf_map_op *
1335bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term)
1336{
1337 struct bpf_map_op *op;
1338 int err;
1339
1340 op = bpf_map_op__new(term);
1341 if (IS_ERR(op))
1342 return op;
1343
1344 err = bpf_map__add_op(map, op);
1345 if (err) {
1346 bpf_map_op__delete(op);
1347 return ERR_PTR(err);
1348 }
1349 return op;
1350}
1351
1352static int
1353__bpf_map__config_value(struct bpf_map *map,
1354 struct parse_events_term *term)
1355{
1356 struct bpf_map_op *op;
1357 const char *map_name = bpf_map__name(map);
1358
1359 if (!map) {
1360 pr_debug("Map '%s' is invalid\n", map_name);
1361 return -BPF_LOADER_ERRNO__INTERNAL;
1362 }
1363
1364 if (bpf_map__type(map) != BPF_MAP_TYPE_ARRAY) {
1365 pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n",
1366 map_name);
1367 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1368 }
1369 if (bpf_map__key_size(map) < sizeof(unsigned int)) {
1370 pr_debug("Map %s has incorrect key size\n", map_name);
1371 return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE;
1372 }
1373 switch (bpf_map__value_size(map)) {
1374 case 1:
1375 case 2:
1376 case 4:
1377 case 8:
1378 break;
1379 default:
1380 pr_debug("Map %s has incorrect value size\n", map_name);
1381 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1382 }
1383
1384 op = bpf_map__add_newop(map, term);
1385 if (IS_ERR(op))
1386 return PTR_ERR(op);
1387 op->op_type = BPF_MAP_OP_SET_VALUE;
1388 op->v.value = term->val.num;
1389 return 0;
1390}
1391
1392static int
1393bpf_map__config_value(struct bpf_map *map,
1394 struct parse_events_term *term,
1395 struct evlist *evlist __maybe_unused)
1396{
1397 if (!term->err_val) {
1398 pr_debug("Config value not set\n");
1399 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1400 }
1401
1402 if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) {
1403 pr_debug("ERROR: wrong value type for 'value'\n");
1404 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1405 }
1406
1407 return __bpf_map__config_value(map, term);
1408}
1409
1410static int
1411__bpf_map__config_event(struct bpf_map *map,
1412 struct parse_events_term *term,
1413 struct evlist *evlist)
1414{
1415 struct bpf_map_op *op;
1416 const char *map_name = bpf_map__name(map);
1417 struct evsel *evsel = evlist__find_evsel_by_str(evlist, term->val.str);
1418
1419 if (!evsel) {
1420 pr_debug("Event (for '%s') '%s' doesn't exist\n",
1421 map_name, term->val.str);
1422 return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT;
1423 }
1424
1425 if (!map) {
1426 pr_debug("Map '%s' is invalid\n", map_name);
1427 return PTR_ERR(map);
1428 }
1429
1430 /*
1431 * No need to check key_size and value_size:
1432 * kernel has already checked them.
1433 */
1434 if (bpf_map__type(map) != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
1435 pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
1436 map_name);
1437 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1438 }
1439
1440 op = bpf_map__add_newop(map, term);
1441 if (IS_ERR(op))
1442 return PTR_ERR(op);
1443 op->op_type = BPF_MAP_OP_SET_EVSEL;
1444 op->v.evsel = evsel;
1445 return 0;
1446}
1447
1448static int
1449bpf_map__config_event(struct bpf_map *map,
1450 struct parse_events_term *term,
1451 struct evlist *evlist)
1452{
1453 if (!term->err_val) {
1454 pr_debug("Config value not set\n");
1455 return -BPF_LOADER_ERRNO__OBJCONF_CONF;
1456 }
1457
1458 if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) {
1459 pr_debug("ERROR: wrong value type for 'event'\n");
1460 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE;
1461 }
1462
1463 return __bpf_map__config_event(map, term, evlist);
1464}
1465
1466struct bpf_obj_config__map_func {
1467 const char *config_opt;
1468 int (*config_func)(struct bpf_map *, struct parse_events_term *,
1469 struct evlist *);
1470};
1471
1472struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = {
1473 {"value", bpf_map__config_value},
1474 {"event", bpf_map__config_event},
1475};
1476
1477static int
1478config_map_indices_range_check(struct parse_events_term *term,
1479 struct bpf_map *map,
1480 const char *map_name)
1481{
1482 struct parse_events_array *array = &term->array;
1483 unsigned int i;
1484
1485 if (!array->nr_ranges)
1486 return 0;
1487 if (!array->ranges) {
1488 pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n",
1489 map_name, (int)array->nr_ranges);
1490 return -BPF_LOADER_ERRNO__INTERNAL;
1491 }
1492
1493 if (!map) {
1494 pr_debug("Map '%s' is invalid\n", map_name);
1495 return -BPF_LOADER_ERRNO__INTERNAL;
1496 }
1497
1498 for (i = 0; i < array->nr_ranges; i++) {
1499 unsigned int start = array->ranges[i].start;
1500 size_t length = array->ranges[i].length;
1501 unsigned int idx = start + length - 1;
1502
1503 if (idx >= bpf_map__max_entries(map)) {
1504 pr_debug("ERROR: index %d too large\n", idx);
1505 return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG;
1506 }
1507 }
1508 return 0;
1509}
1510
1511static int
1512bpf__obj_config_map(struct bpf_object *obj,
1513 struct parse_events_term *term,
1514 struct evlist *evlist,
1515 int *key_scan_pos)
1516{
1517 /* key is "map:<mapname>.<config opt>" */
1518 char *map_name = strdup(term->config + sizeof("map:") - 1);
1519 struct bpf_map *map;
1520 int err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1521 char *map_opt;
1522 size_t i;
1523
1524 if (!map_name)
1525 return -ENOMEM;
1526
1527 map_opt = strchr(map_name, '.');
1528 if (!map_opt) {
1529 pr_debug("ERROR: Invalid map config: %s\n", map_name);
1530 goto out;
1531 }
1532
1533 *map_opt++ = '\0';
1534 if (*map_opt == '\0') {
1535 pr_debug("ERROR: Invalid map option: %s\n", term->config);
1536 goto out;
1537 }
1538
1539 map = bpf_object__find_map_by_name(obj, map_name);
1540 if (!map) {
1541 pr_debug("ERROR: Map %s doesn't exist\n", map_name);
1542 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST;
1543 goto out;
1544 }
1545
1546 *key_scan_pos += strlen(map_opt);
1547 err = config_map_indices_range_check(term, map, map_name);
1548 if (err)
1549 goto out;
1550 *key_scan_pos -= strlen(map_opt);
1551
1552 for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) {
1553 struct bpf_obj_config__map_func *func =
1554 &bpf_obj_config__map_funcs[i];
1555
1556 if (strcmp(map_opt, func->config_opt) == 0) {
1557 err = func->config_func(map, term, evlist);
1558 goto out;
1559 }
1560 }
1561
1562 pr_debug("ERROR: Invalid map config option '%s'\n", map_opt);
1563 err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT;
1564out:
1565 if (!err)
1566 *key_scan_pos += strlen(map_opt);
1567
1568 free(map_name);
1569 return err;
1570}
1571
1572int bpf__config_obj(struct bpf_object *obj,
1573 struct parse_events_term *term,
1574 struct evlist *evlist,
1575 int *error_pos)
1576{
1577 int key_scan_pos = 0;
1578 int err;
1579
1580 if (!obj || !term || !term->config)
1581 return -EINVAL;
1582
1583 if (strstarts(term->config, "map:")) {
1584 key_scan_pos = sizeof("map:") - 1;
1585 err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos);
1586 goto out;
1587 }
1588 err = -BPF_LOADER_ERRNO__OBJCONF_OPT;
1589out:
1590 if (error_pos)
1591 *error_pos = key_scan_pos;
1592 return err;
1593
1594}
1595
1596typedef int (*map_config_func_t)(const char *name, int map_fd,
1597 const struct bpf_map *map,
1598 struct bpf_map_op *op,
1599 void *pkey, void *arg);
1600
1601static int
1602foreach_key_array_all(map_config_func_t func,
1603 void *arg, const char *name,
1604 int map_fd, const struct bpf_map *map,
1605 struct bpf_map_op *op)
1606{
1607 unsigned int i;
1608 int err;
1609
1610 for (i = 0; i < bpf_map__max_entries(map); i++) {
1611 err = func(name, map_fd, map, op, &i, arg);
1612 if (err) {
1613 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1614 name, i);
1615 return err;
1616 }
1617 }
1618 return 0;
1619}
1620
1621static int
1622foreach_key_array_ranges(map_config_func_t func, void *arg,
1623 const char *name, int map_fd,
1624 const struct bpf_map *map,
1625 struct bpf_map_op *op)
1626{
1627 unsigned int i, j;
1628 int err;
1629
1630 for (i = 0; i < op->k.array.nr_ranges; i++) {
1631 unsigned int start = op->k.array.ranges[i].start;
1632 size_t length = op->k.array.ranges[i].length;
1633
1634 for (j = 0; j < length; j++) {
1635 unsigned int idx = start + j;
1636
1637 err = func(name, map_fd, map, op, &idx, arg);
1638 if (err) {
1639 pr_debug("ERROR: failed to insert value to %s[%u]\n",
1640 name, idx);
1641 return err;
1642 }
1643 }
1644 }
1645 return 0;
1646}
1647
1648static int
1649bpf_map_config_foreach_key(struct bpf_map *map,
1650 map_config_func_t func,
1651 void *arg)
1652{
1653 int err, map_fd, type;
1654 struct bpf_map_op *op;
1655 const char *name = bpf_map__name(map);
1656 struct bpf_map_priv *priv = map_priv(map);
1657
1658 if (IS_ERR(priv)) {
1659 pr_debug("ERROR: failed to get private from map %s\n", name);
1660 return -BPF_LOADER_ERRNO__INTERNAL;
1661 }
1662 if (!priv || list_empty(&priv->ops_list)) {
1663 pr_debug("INFO: nothing to config for map %s\n", name);
1664 return 0;
1665 }
1666
1667 if (!map) {
1668 pr_debug("Map '%s' is invalid\n", name);
1669 return -BPF_LOADER_ERRNO__INTERNAL;
1670 }
1671 map_fd = bpf_map__fd(map);
1672 if (map_fd < 0) {
1673 pr_debug("ERROR: failed to get fd from map %s\n", name);
1674 return map_fd;
1675 }
1676
1677 type = bpf_map__type(map);
1678 list_for_each_entry(op, &priv->ops_list, list) {
1679 switch (type) {
1680 case BPF_MAP_TYPE_ARRAY:
1681 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
1682 switch (op->key_type) {
1683 case BPF_MAP_KEY_ALL:
1684 err = foreach_key_array_all(func, arg, name,
1685 map_fd, map, op);
1686 break;
1687 case BPF_MAP_KEY_RANGES:
1688 err = foreach_key_array_ranges(func, arg, name,
1689 map_fd, map, op);
1690 break;
1691 default:
1692 pr_debug("ERROR: keytype for map '%s' invalid\n",
1693 name);
1694 return -BPF_LOADER_ERRNO__INTERNAL;
1695 }
1696 if (err)
1697 return err;
1698 break;
1699 default:
1700 pr_debug("ERROR: type of '%s' incorrect\n", name);
1701 return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE;
1702 }
1703 }
1704
1705 return 0;
1706}
1707
1708static int
1709apply_config_value_for_key(int map_fd, void *pkey,
1710 size_t val_size, u64 val)
1711{
1712 int err = 0;
1713
1714 switch (val_size) {
1715 case 1: {
1716 u8 _val = (u8)(val);
1717 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1718 break;
1719 }
1720 case 2: {
1721 u16 _val = (u16)(val);
1722 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1723 break;
1724 }
1725 case 4: {
1726 u32 _val = (u32)(val);
1727 err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY);
1728 break;
1729 }
1730 case 8: {
1731 err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY);
1732 break;
1733 }
1734 default:
1735 pr_debug("ERROR: invalid value size\n");
1736 return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE;
1737 }
1738 if (err && errno)
1739 err = -errno;
1740 return err;
1741}
1742
1743static int
1744apply_config_evsel_for_key(const char *name, int map_fd, void *pkey,
1745 struct evsel *evsel)
1746{
1747 struct xyarray *xy = evsel->core.fd;
1748 struct perf_event_attr *attr;
1749 unsigned int key, events;
1750 bool check_pass = false;
1751 int *evt_fd;
1752 int err;
1753
1754 if (!xy) {
1755 pr_debug("ERROR: evsel not ready for map %s\n", name);
1756 return -BPF_LOADER_ERRNO__INTERNAL;
1757 }
1758
1759 if (xy->row_size / xy->entry_size != 1) {
1760 pr_debug("ERROR: Dimension of target event is incorrect for map %s\n",
1761 name);
1762 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM;
1763 }
1764
1765 attr = &evsel->core.attr;
1766 if (attr->inherit) {
1767 pr_debug("ERROR: Can't put inherit event into map %s\n", name);
1768 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH;
1769 }
1770
1771 if (evsel__is_bpf_output(evsel))
1772 check_pass = true;
1773 if (attr->type == PERF_TYPE_RAW)
1774 check_pass = true;
1775 if (attr->type == PERF_TYPE_HARDWARE)
1776 check_pass = true;
1777 if (!check_pass) {
1778 pr_debug("ERROR: Event type is wrong for map %s\n", name);
1779 return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE;
1780 }
1781
1782 events = xy->entries / (xy->row_size / xy->entry_size);
1783 key = *((unsigned int *)pkey);
1784 if (key >= events) {
1785 pr_debug("ERROR: there is no event %d for map %s\n",
1786 key, name);
1787 return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE;
1788 }
1789 evt_fd = xyarray__entry(xy, key, 0);
1790 err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY);
1791 if (err && errno)
1792 err = -errno;
1793 return err;
1794}
1795
1796static int
1797apply_obj_config_map_for_key(const char *name, int map_fd,
1798 const struct bpf_map *map,
1799 struct bpf_map_op *op,
1800 void *pkey, void *arg __maybe_unused)
1801{
1802 int err;
1803
1804 switch (op->op_type) {
1805 case BPF_MAP_OP_SET_VALUE:
1806 err = apply_config_value_for_key(map_fd, pkey,
1807 bpf_map__value_size(map),
1808 op->v.value);
1809 break;
1810 case BPF_MAP_OP_SET_EVSEL:
1811 err = apply_config_evsel_for_key(name, map_fd, pkey,
1812 op->v.evsel);
1813 break;
1814 default:
1815 pr_debug("ERROR: unknown value type for '%s'\n", name);
1816 err = -BPF_LOADER_ERRNO__INTERNAL;
1817 }
1818 return err;
1819}
1820
1821static int
1822apply_obj_config_map(struct bpf_map *map)
1823{
1824 return bpf_map_config_foreach_key(map,
1825 apply_obj_config_map_for_key,
1826 NULL);
1827}
1828
1829static int
1830apply_obj_config_object(struct bpf_object *obj)
1831{
1832 struct bpf_map *map;
1833 int err;
1834
1835 bpf_object__for_each_map(map, obj) {
1836 err = apply_obj_config_map(map);
1837 if (err)
1838 return err;
1839 }
1840 return 0;
1841}
1842
1843int bpf__apply_obj_config(void)
1844{
1845 struct bpf_perf_object *perf_obj, *tmp;
1846 int err;
1847
1848 bpf_perf_object__for_each(perf_obj, tmp) {
1849 err = apply_obj_config_object(perf_obj->obj);
1850 if (err)
1851 return err;
1852 }
1853
1854 return 0;
1855}
1856
1857#define bpf__perf_for_each_map(map, pobj, tmp) \
1858 bpf_perf_object__for_each(pobj, tmp) \
1859 bpf_object__for_each_map(map, pobj->obj)
1860
1861#define bpf__perf_for_each_map_named(map, pobj, pobjtmp, name) \
1862 bpf__perf_for_each_map(map, pobj, pobjtmp) \
1863 if (bpf_map__name(map) && (strcmp(name, bpf_map__name(map)) == 0))
1864
1865struct evsel *bpf__setup_output_event(struct evlist *evlist, const char *name)
1866{
1867 struct bpf_map_priv *tmpl_priv = NULL;
1868 struct bpf_perf_object *perf_obj, *tmp;
1869 struct evsel *evsel = NULL;
1870 struct bpf_map *map;
1871 int err;
1872 bool need_init = false;
1873
1874 bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1875 struct bpf_map_priv *priv = map_priv(map);
1876
1877 if (IS_ERR(priv))
1878 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1879
1880 /*
1881 * No need to check map type: type should have been
1882 * verified by kernel.
1883 */
1884 if (!need_init && !priv)
1885 need_init = !priv;
1886 if (!tmpl_priv && priv)
1887 tmpl_priv = priv;
1888 }
1889
1890 if (!need_init)
1891 return NULL;
1892
1893 if (!tmpl_priv) {
1894 char *event_definition = NULL;
1895
1896 if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0)
1897 return ERR_PTR(-ENOMEM);
1898
1899 err = parse_event(evlist, event_definition);
1900 free(event_definition);
1901
1902 if (err) {
1903 pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name);
1904 return ERR_PTR(-err);
1905 }
1906
1907 evsel = evlist__last(evlist);
1908 }
1909
1910 bpf__perf_for_each_map_named(map, perf_obj, tmp, name) {
1911 struct bpf_map_priv *priv = map_priv(map);
1912
1913 if (IS_ERR(priv))
1914 return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL);
1915 if (priv)
1916 continue;
1917
1918 if (tmpl_priv) {
1919 priv = bpf_map_priv__clone(tmpl_priv);
1920 if (!priv)
1921 return ERR_PTR(-ENOMEM);
1922
1923 err = map_set_priv(map, priv);
1924 if (err) {
1925 bpf_map_priv__clear(map, priv);
1926 return ERR_PTR(err);
1927 }
1928 } else if (evsel) {
1929 struct bpf_map_op *op;
1930
1931 op = bpf_map__add_newop(map, NULL);
1932 if (IS_ERR(op))
1933 return ERR_CAST(op);
1934 op->op_type = BPF_MAP_OP_SET_EVSEL;
1935 op->v.evsel = evsel;
1936 }
1937 }
1938
1939 return evsel;
1940}
1941
1942int bpf__setup_stdout(struct evlist *evlist)
1943{
1944 struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__");
1945 return PTR_ERR_OR_ZERO(evsel);
1946}
1947
1948#define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START)
1949#define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c)
1950#define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START)
1951
1952static const char *bpf_loader_strerror_table[NR_ERRNO] = {
1953 [ERRCODE_OFFSET(CONFIG)] = "Invalid config string",
1954 [ERRCODE_OFFSET(GROUP)] = "Invalid group name",
1955 [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string",
1956 [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error",
1957 [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet",
1958 [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string",
1959 [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue",
1960 [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program",
1961 [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue",
1962 [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option",
1963 [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')",
1964 [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option",
1965 [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist",
1966 [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map",
1967 [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type",
1968 [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size",
1969 [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size",
1970 [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting",
1971 [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting",
1972 [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large",
1973 [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event",
1974 [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map",
1975 [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large",
1976};
1977
1978static int
1979bpf_loader_strerror(int err, char *buf, size_t size)
1980{
1981 char sbuf[STRERR_BUFSIZE];
1982 const char *msg;
1983
1984 if (!buf || !size)
1985 return -1;
1986
1987 err = err > 0 ? err : -err;
1988
1989 if (err >= __LIBBPF_ERRNO__START)
1990 return libbpf_strerror(err, buf, size);
1991
1992 if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) {
1993 msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)];
1994 snprintf(buf, size, "%s", msg);
1995 buf[size - 1] = '\0';
1996 return 0;
1997 }
1998
1999 if (err >= __BPF_LOADER_ERRNO__END)
2000 snprintf(buf, size, "Unknown bpf loader error %d", err);
2001 else
2002 snprintf(buf, size, "%s",
2003 str_error_r(err, sbuf, sizeof(sbuf)));
2004
2005 buf[size - 1] = '\0';
2006 return -1;
2007}
2008
2009#define bpf__strerror_head(err, buf, size) \
2010 char sbuf[STRERR_BUFSIZE], *emsg;\
2011 if (!size)\
2012 return 0;\
2013 if (err < 0)\
2014 err = -err;\
2015 bpf_loader_strerror(err, sbuf, sizeof(sbuf));\
2016 emsg = sbuf;\
2017 switch (err) {\
2018 default:\
2019 scnprintf(buf, size, "%s", emsg);\
2020 break;
2021
2022#define bpf__strerror_entry(val, fmt...)\
2023 case val: {\
2024 scnprintf(buf, size, fmt);\
2025 break;\
2026 }
2027
2028#define bpf__strerror_end(buf, size)\
2029 }\
2030 buf[size - 1] = '\0';
2031
2032int bpf__strerror_prepare_load(const char *filename, bool source,
2033 int err, char *buf, size_t size)
2034{
2035 size_t n;
2036 int ret;
2037
2038 n = snprintf(buf, size, "Failed to load %s%s: ",
2039 filename, source ? " from source" : "");
2040 if (n >= size) {
2041 buf[size - 1] = '\0';
2042 return 0;
2043 }
2044 buf += n;
2045 size -= n;
2046
2047 ret = bpf_loader_strerror(err, buf, size);
2048 buf[size - 1] = '\0';
2049 return ret;
2050}
2051
2052int bpf__strerror_probe(struct bpf_object *obj __maybe_unused,
2053 int err, char *buf, size_t size)
2054{
2055 bpf__strerror_head(err, buf, size);
2056 case BPF_LOADER_ERRNO__PROGCONF_TERM: {
2057 scnprintf(buf, size, "%s (add -v to see detail)", emsg);
2058 break;
2059 }
2060 bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'");
2061 bpf__strerror_entry(EACCES, "You need to be root");
2062 bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0");
2063 bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file");
2064 bpf__strerror_end(buf, size);
2065 return 0;
2066}
2067
2068int bpf__strerror_load(struct bpf_object *obj,
2069 int err, char *buf, size_t size)
2070{
2071 bpf__strerror_head(err, buf, size);
2072 case LIBBPF_ERRNO__KVER: {
2073 unsigned int obj_kver = bpf_object__kversion(obj);
2074 unsigned int real_kver;
2075
2076 if (fetch_kernel_version(&real_kver, NULL, 0)) {
2077 scnprintf(buf, size, "Unable to fetch kernel version");
2078 break;
2079 }
2080
2081 if (obj_kver != real_kver) {
2082 scnprintf(buf, size,
2083 "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")",
2084 KVER_PARAM(obj_kver),
2085 KVER_PARAM(real_kver));
2086 break;
2087 }
2088
2089 scnprintf(buf, size, "Failed to load program for unknown reason");
2090 break;
2091 }
2092 bpf__strerror_end(buf, size);
2093 return 0;
2094}
2095
2096int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused,
2097 struct parse_events_term *term __maybe_unused,
2098 struct evlist *evlist __maybe_unused,
2099 int *error_pos __maybe_unused, int err,
2100 char *buf, size_t size)
2101{
2102 bpf__strerror_head(err, buf, size);
2103 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE,
2104 "Can't use this config term with this map type");
2105 bpf__strerror_end(buf, size);
2106 return 0;
2107}
2108
2109int bpf__strerror_apply_obj_config(int err, char *buf, size_t size)
2110{
2111 bpf__strerror_head(err, buf, size);
2112 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM,
2113 "Cannot set event to BPF map in multi-thread tracing");
2114 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH,
2115 "%s (Hint: use -i to turn off inherit)", emsg);
2116 bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE,
2117 "Can only put raw, hardware and BPF output event into a BPF map");
2118 bpf__strerror_end(buf, size);
2119 return 0;
2120}
2121
2122int bpf__strerror_setup_output_event(struct evlist *evlist __maybe_unused,
2123 int err, char *buf, size_t size)
2124{
2125 bpf__strerror_head(err, buf, size);
2126 bpf__strerror_end(buf, size);
2127 return 0;
2128}