Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PERF_ENV_H
3#define __PERF_ENV_H
4
5#include <linux/types.h>
6#include <linux/rbtree.h>
7#include "cpumap.h"
8#include "rwsem.h"
9
10struct perf_cpu_map;
11
12struct cpu_topology_map {
13 int socket_id;
14 int die_id;
15 int cluster_id;
16 int core_id;
17};
18
19struct cpu_cache_level {
20 u32 level;
21 u32 line_size;
22 u32 sets;
23 u32 ways;
24 char *type;
25 char *size;
26 char *map;
27};
28
29struct numa_node {
30 u32 node;
31 u64 mem_total;
32 u64 mem_free;
33 struct perf_cpu_map *map;
34};
35
36struct memory_node {
37 u64 node;
38 u64 size;
39 unsigned long *set;
40};
41
42struct hybrid_node {
43 char *pmu_name;
44 char *cpus;
45};
46
47struct pmu_caps {
48 int nr_caps;
49 unsigned int max_branches;
50 unsigned int br_cntr_nr;
51 unsigned int br_cntr_width;
52
53 char **caps;
54 char *pmu_name;
55};
56
57typedef const char *(arch_syscalls__strerrno_t)(int err);
58
59arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch);
60
61struct perf_env {
62 char *hostname;
63 char *os_release;
64 char *version;
65 char *arch;
66 int nr_cpus_online;
67 int nr_cpus_avail;
68 char *cpu_desc;
69 char *cpuid;
70 unsigned long long total_mem;
71 unsigned int msr_pmu_type;
72 unsigned int max_branches;
73 unsigned int br_cntr_nr;
74 unsigned int br_cntr_width;
75 int kernel_is_64_bit;
76
77 int nr_cmdline;
78 int nr_sibling_cores;
79 int nr_sibling_dies;
80 int nr_sibling_threads;
81 int nr_numa_nodes;
82 int nr_memory_nodes;
83 int nr_pmu_mappings;
84 int nr_groups;
85 int nr_cpu_pmu_caps;
86 int nr_hybrid_nodes;
87 int nr_pmus_with_caps;
88 char *cmdline;
89 const char **cmdline_argv;
90 char *sibling_cores;
91 char *sibling_dies;
92 char *sibling_threads;
93 char *pmu_mappings;
94 char **cpu_pmu_caps;
95 struct cpu_topology_map *cpu;
96 struct cpu_cache_level *caches;
97 int caches_cnt;
98 u32 comp_ratio;
99 u32 comp_ver;
100 u32 comp_type;
101 u32 comp_level;
102 u32 comp_mmap_len;
103 struct numa_node *numa_nodes;
104 struct memory_node *memory_nodes;
105 unsigned long long memory_bsize;
106 struct hybrid_node *hybrid_nodes;
107 struct pmu_caps *pmu_caps;
108#ifdef HAVE_LIBBPF_SUPPORT
109 /*
110 * bpf_info_lock protects bpf rbtrees. This is needed because the
111 * trees are accessed by different threads in perf-top
112 */
113 struct {
114 struct rw_semaphore lock;
115 struct rb_root infos;
116 u32 infos_cnt;
117 struct rb_root btfs;
118 u32 btfs_cnt;
119 } bpf_progs;
120#endif // HAVE_LIBBPF_SUPPORT
121 /* same reason as above (for perf-top) */
122 struct {
123 struct rw_semaphore lock;
124 struct rb_root tree;
125 } cgroups;
126
127 /* For fast cpu to numa node lookup via perf_env__numa_node */
128 int *numa_map;
129 int nr_numa_map;
130
131 /* For real clock time reference. */
132 struct {
133 u64 tod_ns;
134 u64 clockid_ns;
135 u64 clockid_res_ns;
136 int clockid;
137 /*
138 * enabled is valid for report mode, and is true if above
139 * values are set, it's set in process_clock_data
140 */
141 bool enabled;
142 } clock;
143 arch_syscalls__strerrno_t *arch_strerrno;
144};
145
146enum perf_compress_type {
147 PERF_COMP_NONE = 0,
148 PERF_COMP_ZSTD,
149 PERF_COMP_MAX
150};
151
152struct bpf_prog_info_node;
153struct btf_node;
154
155extern struct perf_env perf_env;
156
157void perf_env__exit(struct perf_env *env);
158
159int perf_env__kernel_is_64_bit(struct perf_env *env);
160
161int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
162
163int perf_env__read_cpuid(struct perf_env *env);
164int perf_env__read_pmu_mappings(struct perf_env *env);
165int perf_env__nr_pmu_mappings(struct perf_env *env);
166const char *perf_env__pmu_mappings(struct perf_env *env);
167
168int perf_env__read_cpu_topology_map(struct perf_env *env);
169
170void cpu_cache_level__free(struct cpu_cache_level *cache);
171
172const char *perf_env__arch(struct perf_env *env);
173const char *perf_env__arch_strerrno(struct perf_env *env, int err);
174const char *perf_env__cpuid(struct perf_env *env);
175const char *perf_env__raw_arch(struct perf_env *env);
176int perf_env__nr_cpus_avail(struct perf_env *env);
177
178void perf_env__init(struct perf_env *env);
179bool __perf_env__insert_bpf_prog_info(struct perf_env *env,
180 struct bpf_prog_info_node *info_node);
181bool perf_env__insert_bpf_prog_info(struct perf_env *env,
182 struct bpf_prog_info_node *info_node);
183struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
184 __u32 prog_id);
185bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
186bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
187struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
188struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
189
190int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
191char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
192 const char *cap);
193
194bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name);
195void perf_env__find_br_cntr_info(struct perf_env *env,
196 unsigned int *nr,
197 unsigned int *width);
198
199bool x86__is_amd_cpu(void);
200bool perf_env__is_x86_amd_cpu(struct perf_env *env);
201
202#endif /* __PERF_ENV_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PERF_ENV_H
3#define __PERF_ENV_H
4
5#include <linux/types.h>
6#include <linux/rbtree.h>
7#include "rwsem.h"
8
9struct perf_cpu_map;
10
11struct cpu_topology_map {
12 int socket_id;
13 int die_id;
14 int core_id;
15};
16
17struct cpu_cache_level {
18 u32 level;
19 u32 line_size;
20 u32 sets;
21 u32 ways;
22 char *type;
23 char *size;
24 char *map;
25};
26
27struct numa_node {
28 u32 node;
29 u64 mem_total;
30 u64 mem_free;
31 struct perf_cpu_map *map;
32};
33
34struct memory_node {
35 u64 node;
36 u64 size;
37 unsigned long *set;
38};
39
40struct hybrid_node {
41 char *pmu_name;
42 char *cpus;
43};
44
45struct hybrid_cpc_node {
46 int nr_cpu_pmu_caps;
47 unsigned int max_branches;
48 char *cpu_pmu_caps;
49 char *pmu_name;
50};
51
52struct perf_env {
53 char *hostname;
54 char *os_release;
55 char *version;
56 char *arch;
57 int nr_cpus_online;
58 int nr_cpus_avail;
59 char *cpu_desc;
60 char *cpuid;
61 unsigned long long total_mem;
62 unsigned int msr_pmu_type;
63 unsigned int max_branches;
64
65 int nr_cmdline;
66 int nr_sibling_cores;
67 int nr_sibling_dies;
68 int nr_sibling_threads;
69 int nr_numa_nodes;
70 int nr_memory_nodes;
71 int nr_pmu_mappings;
72 int nr_groups;
73 int nr_cpu_pmu_caps;
74 int nr_hybrid_nodes;
75 int nr_hybrid_cpc_nodes;
76 char *cmdline;
77 const char **cmdline_argv;
78 char *sibling_cores;
79 char *sibling_dies;
80 char *sibling_threads;
81 char *pmu_mappings;
82 char *cpu_pmu_caps;
83 struct cpu_topology_map *cpu;
84 struct cpu_cache_level *caches;
85 int caches_cnt;
86 u32 comp_ratio;
87 u32 comp_ver;
88 u32 comp_type;
89 u32 comp_level;
90 u32 comp_mmap_len;
91 struct numa_node *numa_nodes;
92 struct memory_node *memory_nodes;
93 unsigned long long memory_bsize;
94 struct hybrid_node *hybrid_nodes;
95 struct hybrid_cpc_node *hybrid_cpc_nodes;
96#ifdef HAVE_LIBBPF_SUPPORT
97 /*
98 * bpf_info_lock protects bpf rbtrees. This is needed because the
99 * trees are accessed by different threads in perf-top
100 */
101 struct {
102 struct rw_semaphore lock;
103 struct rb_root infos;
104 u32 infos_cnt;
105 struct rb_root btfs;
106 u32 btfs_cnt;
107 } bpf_progs;
108#endif // HAVE_LIBBPF_SUPPORT
109 /* same reason as above (for perf-top) */
110 struct {
111 struct rw_semaphore lock;
112 struct rb_root tree;
113 } cgroups;
114
115 /* For fast cpu to numa node lookup via perf_env__numa_node */
116 int *numa_map;
117 int nr_numa_map;
118
119 /* For real clock time reference. */
120 struct {
121 u64 tod_ns;
122 u64 clockid_ns;
123 u64 clockid_res_ns;
124 int clockid;
125 /*
126 * enabled is valid for report mode, and is true if above
127 * values are set, it's set in process_clock_data
128 */
129 bool enabled;
130 } clock;
131};
132
133enum perf_compress_type {
134 PERF_COMP_NONE = 0,
135 PERF_COMP_ZSTD,
136 PERF_COMP_MAX
137};
138
139struct bpf_prog_info_node;
140struct btf_node;
141
142extern struct perf_env perf_env;
143
144void perf_env__exit(struct perf_env *env);
145
146int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
147
148int perf_env__read_cpuid(struct perf_env *env);
149int perf_env__read_cpu_topology_map(struct perf_env *env);
150
151void cpu_cache_level__free(struct cpu_cache_level *cache);
152
153const char *perf_env__arch(struct perf_env *env);
154const char *perf_env__raw_arch(struct perf_env *env);
155int perf_env__nr_cpus_avail(struct perf_env *env);
156
157void perf_env__init(struct perf_env *env);
158void perf_env__insert_bpf_prog_info(struct perf_env *env,
159 struct bpf_prog_info_node *info_node);
160struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
161 __u32 prog_id);
162void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
163struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
164
165int perf_env__numa_node(struct perf_env *env, int cpu);
166#endif /* __PERF_ENV_H */