Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PERF_ENV_H
3#define __PERF_ENV_H
4
5#include <linux/types.h>
6#include <linux/rbtree.h>
7#include "cpumap.h"
8#include "rwsem.h"
9
10struct perf_cpu_map;
11
12struct cpu_topology_map {
13 int socket_id;
14 int die_id;
15 int core_id;
16};
17
18struct cpu_cache_level {
19 u32 level;
20 u32 line_size;
21 u32 sets;
22 u32 ways;
23 char *type;
24 char *size;
25 char *map;
26};
27
28struct numa_node {
29 u32 node;
30 u64 mem_total;
31 u64 mem_free;
32 struct perf_cpu_map *map;
33};
34
35struct memory_node {
36 u64 node;
37 u64 size;
38 unsigned long *set;
39};
40
41struct hybrid_node {
42 char *pmu_name;
43 char *cpus;
44};
45
46struct pmu_caps {
47 int nr_caps;
48 unsigned int max_branches;
49 char **caps;
50 char *pmu_name;
51};
52
53struct perf_env {
54 char *hostname;
55 char *os_release;
56 char *version;
57 char *arch;
58 int nr_cpus_online;
59 int nr_cpus_avail;
60 char *cpu_desc;
61 char *cpuid;
62 unsigned long long total_mem;
63 unsigned int msr_pmu_type;
64 unsigned int max_branches;
65 int kernel_is_64_bit;
66
67 int nr_cmdline;
68 int nr_sibling_cores;
69 int nr_sibling_dies;
70 int nr_sibling_threads;
71 int nr_numa_nodes;
72 int nr_memory_nodes;
73 int nr_pmu_mappings;
74 int nr_groups;
75 int nr_cpu_pmu_caps;
76 int nr_hybrid_nodes;
77 int nr_pmus_with_caps;
78 char *cmdline;
79 const char **cmdline_argv;
80 char *sibling_cores;
81 char *sibling_dies;
82 char *sibling_threads;
83 char *pmu_mappings;
84 char **cpu_pmu_caps;
85 struct cpu_topology_map *cpu;
86 struct cpu_cache_level *caches;
87 int caches_cnt;
88 u32 comp_ratio;
89 u32 comp_ver;
90 u32 comp_type;
91 u32 comp_level;
92 u32 comp_mmap_len;
93 struct numa_node *numa_nodes;
94 struct memory_node *memory_nodes;
95 unsigned long long memory_bsize;
96 struct hybrid_node *hybrid_nodes;
97 struct pmu_caps *pmu_caps;
98#ifdef HAVE_LIBBPF_SUPPORT
99 /*
100 * bpf_info_lock protects bpf rbtrees. This is needed because the
101 * trees are accessed by different threads in perf-top
102 */
103 struct {
104 struct rw_semaphore lock;
105 struct rb_root infos;
106 u32 infos_cnt;
107 struct rb_root btfs;
108 u32 btfs_cnt;
109 } bpf_progs;
110#endif // HAVE_LIBBPF_SUPPORT
111 /* same reason as above (for perf-top) */
112 struct {
113 struct rw_semaphore lock;
114 struct rb_root tree;
115 } cgroups;
116
117 /* For fast cpu to numa node lookup via perf_env__numa_node */
118 int *numa_map;
119 int nr_numa_map;
120
121 /* For real clock time reference. */
122 struct {
123 u64 tod_ns;
124 u64 clockid_ns;
125 u64 clockid_res_ns;
126 int clockid;
127 /*
128 * enabled is valid for report mode, and is true if above
129 * values are set, it's set in process_clock_data
130 */
131 bool enabled;
132 } clock;
133};
134
135enum perf_compress_type {
136 PERF_COMP_NONE = 0,
137 PERF_COMP_ZSTD,
138 PERF_COMP_MAX
139};
140
141struct bpf_prog_info_node;
142struct btf_node;
143
144extern struct perf_env perf_env;
145
146void perf_env__exit(struct perf_env *env);
147
148int perf_env__kernel_is_64_bit(struct perf_env *env);
149
150int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
151
152int perf_env__read_cpuid(struct perf_env *env);
153int perf_env__read_pmu_mappings(struct perf_env *env);
154int perf_env__nr_pmu_mappings(struct perf_env *env);
155const char *perf_env__pmu_mappings(struct perf_env *env);
156
157int perf_env__read_cpu_topology_map(struct perf_env *env);
158
159void cpu_cache_level__free(struct cpu_cache_level *cache);
160
161const char *perf_env__arch(struct perf_env *env);
162const char *perf_env__cpuid(struct perf_env *env);
163const char *perf_env__raw_arch(struct perf_env *env);
164int perf_env__nr_cpus_avail(struct perf_env *env);
165
166void perf_env__init(struct perf_env *env);
167void perf_env__insert_bpf_prog_info(struct perf_env *env,
168 struct bpf_prog_info_node *info_node);
169struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
170 __u32 prog_id);
171bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
172struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
173
174int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
175char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
176 const char *cap);
177#endif /* __PERF_ENV_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PERF_ENV_H
3#define __PERF_ENV_H
4
5#include <linux/types.h>
6#include <linux/rbtree.h>
7#include "cpumap.h"
8#include "rwsem.h"
9
10struct perf_cpu_map;
11
12struct cpu_topology_map {
13 int socket_id;
14 int die_id;
15 int cluster_id;
16 int core_id;
17};
18
19struct cpu_cache_level {
20 u32 level;
21 u32 line_size;
22 u32 sets;
23 u32 ways;
24 char *type;
25 char *size;
26 char *map;
27};
28
29struct numa_node {
30 u32 node;
31 u64 mem_total;
32 u64 mem_free;
33 struct perf_cpu_map *map;
34};
35
36struct memory_node {
37 u64 node;
38 u64 size;
39 unsigned long *set;
40};
41
42struct hybrid_node {
43 char *pmu_name;
44 char *cpus;
45};
46
47struct pmu_caps {
48 int nr_caps;
49 unsigned int max_branches;
50 unsigned int br_cntr_nr;
51 unsigned int br_cntr_width;
52
53 char **caps;
54 char *pmu_name;
55};
56
57typedef const char *(arch_syscalls__strerrno_t)(int err);
58
59arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch);
60
61struct perf_env {
62 char *hostname;
63 char *os_release;
64 char *version;
65 char *arch;
66 int nr_cpus_online;
67 int nr_cpus_avail;
68 char *cpu_desc;
69 char *cpuid;
70 unsigned long long total_mem;
71 unsigned int msr_pmu_type;
72 unsigned int max_branches;
73 unsigned int br_cntr_nr;
74 unsigned int br_cntr_width;
75 int kernel_is_64_bit;
76
77 int nr_cmdline;
78 int nr_sibling_cores;
79 int nr_sibling_dies;
80 int nr_sibling_threads;
81 int nr_numa_nodes;
82 int nr_memory_nodes;
83 int nr_pmu_mappings;
84 int nr_groups;
85 int nr_cpu_pmu_caps;
86 int nr_hybrid_nodes;
87 int nr_pmus_with_caps;
88 char *cmdline;
89 const char **cmdline_argv;
90 char *sibling_cores;
91 char *sibling_dies;
92 char *sibling_threads;
93 char *pmu_mappings;
94 char **cpu_pmu_caps;
95 struct cpu_topology_map *cpu;
96 struct cpu_cache_level *caches;
97 int caches_cnt;
98 u32 comp_ratio;
99 u32 comp_ver;
100 u32 comp_type;
101 u32 comp_level;
102 u32 comp_mmap_len;
103 struct numa_node *numa_nodes;
104 struct memory_node *memory_nodes;
105 unsigned long long memory_bsize;
106 struct hybrid_node *hybrid_nodes;
107 struct pmu_caps *pmu_caps;
108#ifdef HAVE_LIBBPF_SUPPORT
109 /*
110 * bpf_info_lock protects bpf rbtrees. This is needed because the
111 * trees are accessed by different threads in perf-top
112 */
113 struct {
114 struct rw_semaphore lock;
115 struct rb_root infos;
116 u32 infos_cnt;
117 struct rb_root btfs;
118 u32 btfs_cnt;
119 } bpf_progs;
120#endif // HAVE_LIBBPF_SUPPORT
121 /* same reason as above (for perf-top) */
122 struct {
123 struct rw_semaphore lock;
124 struct rb_root tree;
125 } cgroups;
126
127 /* For fast cpu to numa node lookup via perf_env__numa_node */
128 int *numa_map;
129 int nr_numa_map;
130
131 /* For real clock time reference. */
132 struct {
133 u64 tod_ns;
134 u64 clockid_ns;
135 u64 clockid_res_ns;
136 int clockid;
137 /*
138 * enabled is valid for report mode, and is true if above
139 * values are set, it's set in process_clock_data
140 */
141 bool enabled;
142 } clock;
143 arch_syscalls__strerrno_t *arch_strerrno;
144};
145
146enum perf_compress_type {
147 PERF_COMP_NONE = 0,
148 PERF_COMP_ZSTD,
149 PERF_COMP_MAX
150};
151
152struct bpf_prog_info_node;
153struct btf_node;
154
155extern struct perf_env perf_env;
156
157void perf_env__exit(struct perf_env *env);
158
159int perf_env__kernel_is_64_bit(struct perf_env *env);
160
161int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
162
163int perf_env__read_cpuid(struct perf_env *env);
164int perf_env__read_pmu_mappings(struct perf_env *env);
165int perf_env__nr_pmu_mappings(struct perf_env *env);
166const char *perf_env__pmu_mappings(struct perf_env *env);
167
168int perf_env__read_cpu_topology_map(struct perf_env *env);
169
170void cpu_cache_level__free(struct cpu_cache_level *cache);
171
172const char *perf_env__arch(struct perf_env *env);
173const char *perf_env__arch_strerrno(struct perf_env *env, int err);
174const char *perf_env__cpuid(struct perf_env *env);
175const char *perf_env__raw_arch(struct perf_env *env);
176int perf_env__nr_cpus_avail(struct perf_env *env);
177
178void perf_env__init(struct perf_env *env);
179bool __perf_env__insert_bpf_prog_info(struct perf_env *env,
180 struct bpf_prog_info_node *info_node);
181bool perf_env__insert_bpf_prog_info(struct perf_env *env,
182 struct bpf_prog_info_node *info_node);
183struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
184 __u32 prog_id);
185bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
186bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
187struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
188struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
189
190int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
191char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
192 const char *cap);
193
194bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name);
195void perf_env__find_br_cntr_info(struct perf_env *env,
196 unsigned int *nr,
197 unsigned int *width);
198
199bool x86__is_amd_cpu(void);
200bool perf_env__is_x86_amd_cpu(struct perf_env *env);
201
202#endif /* __PERF_ENV_H */