Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include "unwind.h"
3#include "dso.h"
4#include "map.h"
5#include "thread.h"
6#include "session.h"
7#include "debug.h"
8#include "env.h"
9#include "callchain.h"
10
11struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
12struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
13struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
14
15static void unwind__register_ops(struct map_groups *mg,
16 struct unwind_libunwind_ops *ops)
17{
18 mg->unwind_libunwind_ops = ops;
19}
20
21int unwind__prepare_access(struct map_groups *mg, struct map *map,
22 bool *initialized)
23{
24 const char *arch;
25 enum dso_type dso_type;
26 struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
27 int err;
28
29 if (!dwarf_callchain_users)
30 return 0;
31
32 if (mg->addr_space) {
33 pr_debug("unwind: thread map already set, dso=%s\n",
34 map->dso->name);
35 if (initialized)
36 *initialized = true;
37 return 0;
38 }
39
40 /* env->arch is NULL for live-mode (i.e. perf top) */
41 if (!mg->machine->env || !mg->machine->env->arch)
42 goto out_register;
43
44 dso_type = dso__type(map->dso, mg->machine);
45 if (dso_type == DSO__TYPE_UNKNOWN)
46 return 0;
47
48 arch = perf_env__arch(mg->machine->env);
49
50 if (!strcmp(arch, "x86")) {
51 if (dso_type != DSO__TYPE_64BIT)
52 ops = x86_32_unwind_libunwind_ops;
53 } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) {
54 if (dso_type == DSO__TYPE_64BIT)
55 ops = arm64_unwind_libunwind_ops;
56 }
57
58 if (!ops) {
59 pr_err("unwind: target platform=%s is not supported\n", arch);
60 return 0;
61 }
62out_register:
63 unwind__register_ops(mg, ops);
64
65 err = mg->unwind_libunwind_ops->prepare_access(mg);
66 if (initialized)
67 *initialized = err ? false : true;
68 return err;
69}
70
71void unwind__flush_access(struct map_groups *mg)
72{
73 if (mg->unwind_libunwind_ops)
74 mg->unwind_libunwind_ops->flush_access(mg);
75}
76
77void unwind__finish_access(struct map_groups *mg)
78{
79 if (mg->unwind_libunwind_ops)
80 mg->unwind_libunwind_ops->finish_access(mg);
81}
82
83int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
84 struct thread *thread,
85 struct perf_sample *data, int max_stack)
86{
87 if (thread->mg->unwind_libunwind_ops)
88 return thread->mg->unwind_libunwind_ops->get_entries(cb, arg, thread, data, max_stack);
89 return 0;
90}
1// SPDX-License-Identifier: GPL-2.0
2#include "unwind.h"
3#include "dso.h"
4#include "map.h"
5#include "thread.h"
6#include "session.h"
7#include "debug.h"
8#include "env.h"
9#include "callchain.h"
10
11struct unwind_libunwind_ops __weak *local_unwind_libunwind_ops;
12struct unwind_libunwind_ops __weak *x86_32_unwind_libunwind_ops;
13struct unwind_libunwind_ops __weak *arm64_unwind_libunwind_ops;
14
15static void unwind__register_ops(struct maps *maps, struct unwind_libunwind_ops *ops)
16{
17 maps->unwind_libunwind_ops = ops;
18}
19
20int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized)
21{
22 const char *arch;
23 enum dso_type dso_type;
24 struct unwind_libunwind_ops *ops = local_unwind_libunwind_ops;
25 int err;
26
27 if (!dwarf_callchain_users)
28 return 0;
29
30 if (maps->addr_space) {
31 pr_debug("unwind: thread map already set, dso=%s\n",
32 map->dso->name);
33 if (initialized)
34 *initialized = true;
35 return 0;
36 }
37
38 /* env->arch is NULL for live-mode (i.e. perf top) */
39 if (!maps->machine->env || !maps->machine->env->arch)
40 goto out_register;
41
42 dso_type = dso__type(map->dso, maps->machine);
43 if (dso_type == DSO__TYPE_UNKNOWN)
44 return 0;
45
46 arch = perf_env__arch(maps->machine->env);
47
48 if (!strcmp(arch, "x86")) {
49 if (dso_type != DSO__TYPE_64BIT)
50 ops = x86_32_unwind_libunwind_ops;
51 } else if (!strcmp(arch, "arm64") || !strcmp(arch, "arm")) {
52 if (dso_type == DSO__TYPE_64BIT)
53 ops = arm64_unwind_libunwind_ops;
54 }
55
56 if (!ops) {
57 pr_err("unwind: target platform=%s is not supported\n", arch);
58 return 0;
59 }
60out_register:
61 unwind__register_ops(maps, ops);
62
63 err = maps->unwind_libunwind_ops->prepare_access(maps);
64 if (initialized)
65 *initialized = err ? false : true;
66 return err;
67}
68
69void unwind__flush_access(struct maps *maps)
70{
71 if (maps->unwind_libunwind_ops)
72 maps->unwind_libunwind_ops->flush_access(maps);
73}
74
75void unwind__finish_access(struct maps *maps)
76{
77 if (maps->unwind_libunwind_ops)
78 maps->unwind_libunwind_ops->finish_access(maps);
79}
80
81int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
82 struct thread *thread,
83 struct perf_sample *data, int max_stack,
84 bool best_effort)
85{
86 if (thread->maps->unwind_libunwind_ops)
87 return thread->maps->unwind_libunwind_ops->get_entries(cb, arg, thread, data,
88 max_stack, best_effort);
89 return 0;
90}