Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __UNWIND_H
3#define __UNWIND_H
4
5#include <linux/compiler.h>
6#include <linux/types.h>
7
8struct map;
9struct map_groups;
10struct perf_sample;
11struct symbol;
12struct thread;
13
14struct unwind_entry {
15 struct map *map;
16 struct symbol *sym;
17 u64 ip;
18};
19
20typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
21
22struct unwind_libunwind_ops {
23 int (*prepare_access)(struct map_groups *mg);
24 void (*flush_access)(struct map_groups *mg);
25 void (*finish_access)(struct map_groups *mg);
26 int (*get_entries)(unwind_entry_cb_t cb, void *arg,
27 struct thread *thread,
28 struct perf_sample *data, int max_stack);
29};
30
31#ifdef HAVE_DWARF_UNWIND_SUPPORT
32int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
33 struct thread *thread,
34 struct perf_sample *data, int max_stack);
35/* libunwind specific */
36#ifdef HAVE_LIBUNWIND_SUPPORT
37#ifndef LIBUNWIND__ARCH_REG_ID
38#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arch_reg_id(regnum)
39#endif
40
41#ifndef LIBUNWIND__ARCH_REG_SP
42#define LIBUNWIND__ARCH_REG_SP PERF_REG_SP
43#endif
44
45#ifndef LIBUNWIND__ARCH_REG_IP
46#define LIBUNWIND__ARCH_REG_IP PERF_REG_IP
47#endif
48
49int LIBUNWIND__ARCH_REG_ID(int regnum);
50int unwind__prepare_access(struct map_groups *mg, struct map *map,
51 bool *initialized);
52void unwind__flush_access(struct map_groups *mg);
53void unwind__finish_access(struct map_groups *mg);
54#else
55static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
56 struct map *map __maybe_unused,
57 bool *initialized __maybe_unused)
58{
59 return 0;
60}
61
62static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
63static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
64#endif
65#else
66static inline int
67unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
68 void *arg __maybe_unused,
69 struct thread *thread __maybe_unused,
70 struct perf_sample *data __maybe_unused,
71 int max_stack __maybe_unused)
72{
73 return 0;
74}
75
76static inline int unwind__prepare_access(struct map_groups *mg __maybe_unused,
77 struct map *map __maybe_unused,
78 bool *initialized __maybe_unused)
79{
80 return 0;
81}
82
83static inline void unwind__flush_access(struct map_groups *mg __maybe_unused) {}
84static inline void unwind__finish_access(struct map_groups *mg __maybe_unused) {}
85#endif /* HAVE_DWARF_UNWIND_SUPPORT */
86#endif /* __UNWIND_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __UNWIND_H
3#define __UNWIND_H
4
5#include <linux/compiler.h>
6#include <linux/types.h>
7#include "util/map_symbol.h"
8
9struct maps;
10struct perf_sample;
11struct thread;
12
13struct unwind_entry {
14 struct map_symbol ms;
15 u64 ip;
16};
17
18typedef int (*unwind_entry_cb_t)(struct unwind_entry *entry, void *arg);
19
20struct unwind_libunwind_ops {
21 int (*prepare_access)(struct maps *maps);
22 void (*flush_access)(struct maps *maps);
23 void (*finish_access)(struct maps *maps);
24 int (*get_entries)(unwind_entry_cb_t cb, void *arg,
25 struct thread *thread,
26 struct perf_sample *data, int max_stack, bool best_effort);
27};
28
29#ifdef HAVE_DWARF_UNWIND_SUPPORT
30/*
31 * When best_effort is set, don't report errors and fail silently. This could
32 * be expanded in the future to be more permissive about things other than
33 * error messages.
34 */
35int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
36 struct thread *thread,
37 struct perf_sample *data, int max_stack,
38 bool best_effort);
39/* libunwind specific */
40#ifdef HAVE_LIBUNWIND_SUPPORT
41#ifndef LIBUNWIND__ARCH_REG_ID
42#define LIBUNWIND__ARCH_REG_ID(regnum) libunwind__arch_reg_id(regnum)
43#endif
44
45int LIBUNWIND__ARCH_REG_ID(int regnum);
46int unwind__prepare_access(struct maps *maps, struct map *map, bool *initialized);
47void unwind__flush_access(struct maps *maps);
48void unwind__finish_access(struct maps *maps);
49#else
50static inline int unwind__prepare_access(struct maps *maps __maybe_unused,
51 struct map *map __maybe_unused,
52 bool *initialized __maybe_unused)
53{
54 return 0;
55}
56
57static inline void unwind__flush_access(struct maps *maps __maybe_unused) {}
58static inline void unwind__finish_access(struct maps *maps __maybe_unused) {}
59#endif
60#else
61static inline int
62unwind__get_entries(unwind_entry_cb_t cb __maybe_unused,
63 void *arg __maybe_unused,
64 struct thread *thread __maybe_unused,
65 struct perf_sample *data __maybe_unused,
66 int max_stack __maybe_unused,
67 bool best_effort __maybe_unused)
68{
69 return 0;
70}
71
72static inline int unwind__prepare_access(struct maps *maps __maybe_unused,
73 struct map *map __maybe_unused,
74 bool *initialized __maybe_unused)
75{
76 return 0;
77}
78
79static inline void unwind__flush_access(struct maps *maps __maybe_unused) {}
80static inline void unwind__finish_access(struct maps *maps __maybe_unused) {}
81#endif /* HAVE_DWARF_UNWIND_SUPPORT */
82#endif /* __UNWIND_H */