Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include "unwind.h"
3#include "dso.h"
4#include "map.h"
5#include "thread.h"
6#include "session.h"
7#include "debug.h"
8#include "env.h"
9#include "callchain.h"
10
11struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
12struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
13struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
14
15static void unwind__register_ops(struct map_groups *mg,
16 struct unwind_libunwind_ops *ops)
17{
18 mg->unwind_libunwind_ops = ops;
19}
20
21int unwind__prepare_access(struct map_groups *mg, struct map *map,
22 bool *initialized)
23{
24 const char *arch;
25 enum dso_type dso_type;
26 struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
27 int err;
28
29 if (!dwarf_callchain_users)
30 return 0;
31
32 if (mg->addr_space) {
33 pr_debug("unwind: thread map already set, dso=%s\n",
34 map->dso->name);
35 if (initialized)
36 *initialized = true;
37 return 0;
38 }
39
40 /* env->arch is NULL for live-mode (i.e. perf top) */
41 if (!mg->machine->env || !mg->machine->env->arch)
42 goto out_register;
43
44 dso_type = dso__type(map->dso, mg->machine);
45 if (dso_type == DSO__TYPE_UNKNOWN)
46 return 0;
47
48 arch = perf_env__arch(mg->machine->env);
49
50 if (!strcmp(arch, "x86")) {
51 if (dso_type != DSO__TYPE_64BIT)
52 ops = x86_32_unwind_libunwind_ops;
53 } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) {
54 if (dso_type == DSO__TYPE_64BIT)
55 ops = arm64_unwind_libunwind_ops;
56 }
57
58 if (!ops) {
59 pr_err("unwind: target platform=%s is not supported\n", arch);
60 return 0;
61 }
62out_register:
63 unwind__register_ops(mg, ops);
64
65 err = mg->unwind_libunwind_ops->prepare_access(mg);
66 if (initialized)
67 *initialized = err ? false : true;
68 return err;
69}
70
71void unwind__flush_access(struct map_groups *mg)
72{
73 if (mg->unwind_libunwind_ops)
74 mg->unwind_libunwind_ops->flush_access(mg);
75}
76
77void unwind__finish_access(struct map_groups *mg)
78{
79 if (mg->unwind_libunwind_ops)
80 mg->unwind_libunwind_ops->finish_access(mg);
81}
82
83int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
84 struct thread *thread,
85 struct perf_sample *data, int max_stack)
86{
87 if (thread->mg->unwind_libunwind_ops)
88 return thread->mg->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
89 return 0;
90}
1// SPDX-License-Identifier: GPL-2.0
2#include "unwind.h"
3#include "dso.h"
4#include "map.h"
5#include "thread.h"
6#include "session.h"
7#include "debug.h"
8#include "env.h"
9#include "callchain.h"
10
11struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
12struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
13struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
14
15int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized)
16{
17 const char *arch;
18 enum dso_type dso_type;
19 struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
20 struct dso *dso = map__dso(map);
21 struct machine *machine;
22 int err;
23
24 if (!dwarf_callchain_users)
25 return 0;
26
27 if (maps__addr_space(maps)) {
28 pr_debug("unwind: thread map already set, dso=%s\n", dso__name(dso));
29 if (initialized)
30 *initialized = true;
31 return 0;
32 }
33
34 machine = maps__machine(maps);
35 /* env->arch is NULL for live-mode (i.e. perf top) */
36 if (!machine->env || !machine->env->arch)
37 goto out_register;
38
39 dso_type = dso__type(dso, machine);
40 if (dso_type == DSO__TYPE_UNKNOWN)
41 return 0;
42
43 arch = perf_env__arch(machine->env);
44
45 if (!strcmp(arch, "x86")) {
46 if (dso_type != DSO__TYPE_64BIT)
47 ops = x86_32_unwind_libunwind_ops;
48 } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) {
49 if (dso_type == DSO__TYPE_64BIT)
50 ops = arm64_unwind_libunwind_ops;
51 }
52
53 if (!ops) {
54 pr_warning_once("unwind: target platform=%s is not supported\n", arch);
55 return 0;
56 }
57out_register:
58 maps__set_unwind_libunwind_ops(maps, ops);
59
60 err = maps__unwind_libunwind_ops(maps)->prepare_access(maps);
61 if (initialized)
62 *initialized = err ? false : true;
63 return err;
64}
65
66void unwind__flush_access(struct maps *maps)
67{
68 const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(maps);
69
70 if (ops)
71 ops->flush_access(maps);
72}
73
74void unwind__finish_access(struct maps *maps)
75{
76 const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(maps);
77
78 if (ops)
79 ops->finish_access(maps);
80}
81
82int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
83 struct thread *thread,
84 struct perf_sample *data, int max_stack,
85 bool best_effort)
86{
87 const struct unwind_libunwind_ops *ops = maps__unwind_libunwind_ops(thread__maps(thread));
88
89 if (ops)
90 return ops->get_entries(cb, arg, thread, data, max_stack, best_effort);
91 return 0;
92}