Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PERF_ENV_H
3#define __PERF_ENV_H
4
5#include <linux/types.h>
6#include "cpumap.h"
7
8struct cpu_topology_map {
9 int socket_id;
10 int core_id;
11};
12
13struct cpu_cache_level {
14 u32 level;
15 u32 line_size;
16 u32 sets;
17 u32 ways;
18 char *type;
19 char *size;
20 char *map;
21};
22
23struct numa_node {
24 u32 node;
25 u64 mem_total;
26 u64 mem_free;
27 struct cpu_map *map;
28};
29
30struct memory_node {
31 u64 node;
32 u64 size;
33 unsigned long *set;
34};
35
36struct perf_env {
37 char *hostname;
38 char *os_release;
39 char *version;
40 char *arch;
41 int nr_cpus_online;
42 int nr_cpus_avail;
43 char *cpu_desc;
44 char *cpuid;
45 unsigned long long total_mem;
46 unsigned int msr_pmu_type;
47
48 int nr_cmdline;
49 int nr_sibling_cores;
50 int nr_sibling_threads;
51 int nr_numa_nodes;
52 int nr_memory_nodes;
53 int nr_pmu_mappings;
54 int nr_groups;
55 char *cmdline;
56 const char **cmdline_argv;
57 char *sibling_cores;
58 char *sibling_threads;
59 char *pmu_mappings;
60 struct cpu_topology_map *cpu;
61 struct cpu_cache_level *caches;
62 int caches_cnt;
63 struct numa_node *numa_nodes;
64 struct memory_node *memory_nodes;
65 unsigned long long memory_bsize;
66};
67
68extern struct perf_env perf_env;
69
70void perf_env__exit(struct perf_env *env);
71
72int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
73
74int perf_env__read_cpu_topology_map(struct perf_env *env);
75
76void cpu_cache_level__free(struct cpu_cache_level *cache);
77
78const char *perf_env__arch(struct perf_env *env);
79#endif /* __PERF_ENV_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __PERF_ENV_H
3#define __PERF_ENV_H
4
5#include <linux/types.h>
6#include <linux/rbtree.h>
7#include "cpumap.h"
8#include "rwsem.h"
9
10struct perf_cpu_map;
11
12struct cpu_topology_map {
13 int socket_id;
14 int die_id;
15 int core_id;
16};
17
18struct cpu_cache_level {
19 u32 level;
20 u32 line_size;
21 u32 sets;
22 u32 ways;
23 char *type;
24 char *size;
25 char *map;
26};
27
28struct numa_node {
29 u32 node;
30 u64 mem_total;
31 u64 mem_free;
32 struct perf_cpu_map *map;
33};
34
35struct memory_node {
36 u64 node;
37 u64 size;
38 unsigned long *set;
39};
40
41struct hybrid_node {
42 char *pmu_name;
43 char *cpus;
44};
45
46struct pmu_caps {
47 int nr_caps;
48 unsigned int max_branches;
49 unsigned int br_cntr_nr;
50 unsigned int br_cntr_width;
51
52 char **caps;
53 char *pmu_name;
54};
55
56typedef const char *(arch_syscalls__strerrno_t)(int err);
57
58arch_syscalls__strerrno_t *arch_syscalls__strerrno_function(const char *arch);
59
60struct perf_env {
61 char *hostname;
62 char *os_release;
63 char *version;
64 char *arch;
65 int nr_cpus_online;
66 int nr_cpus_avail;
67 char *cpu_desc;
68 char *cpuid;
69 unsigned long long total_mem;
70 unsigned int msr_pmu_type;
71 unsigned int max_branches;
72 unsigned int br_cntr_nr;
73 unsigned int br_cntr_width;
74 int kernel_is_64_bit;
75
76 int nr_cmdline;
77 int nr_sibling_cores;
78 int nr_sibling_dies;
79 int nr_sibling_threads;
80 int nr_numa_nodes;
81 int nr_memory_nodes;
82 int nr_pmu_mappings;
83 int nr_groups;
84 int nr_cpu_pmu_caps;
85 int nr_hybrid_nodes;
86 int nr_pmus_with_caps;
87 char *cmdline;
88 const char **cmdline_argv;
89 char *sibling_cores;
90 char *sibling_dies;
91 char *sibling_threads;
92 char *pmu_mappings;
93 char **cpu_pmu_caps;
94 struct cpu_topology_map *cpu;
95 struct cpu_cache_level *caches;
96 int caches_cnt;
97 u32 comp_ratio;
98 u32 comp_ver;
99 u32 comp_type;
100 u32 comp_level;
101 u32 comp_mmap_len;
102 struct numa_node *numa_nodes;
103 struct memory_node *memory_nodes;
104 unsigned long long memory_bsize;
105 struct hybrid_node *hybrid_nodes;
106 struct pmu_caps *pmu_caps;
107#ifdef HAVE_LIBBPF_SUPPORT
108 /*
109 * bpf_info_lock protects bpf rbtrees. This is needed because the
110 * trees are accessed by different threads in perf-top
111 */
112 struct {
113 struct rw_semaphore lock;
114 struct rb_root infos;
115 u32 infos_cnt;
116 struct rb_root btfs;
117 u32 btfs_cnt;
118 } bpf_progs;
119#endif // HAVE_LIBBPF_SUPPORT
120 /* same reason as above (for perf-top) */
121 struct {
122 struct rw_semaphore lock;
123 struct rb_root tree;
124 } cgroups;
125
126 /* For fast cpu to numa node lookup via perf_env__numa_node */
127 int *numa_map;
128 int nr_numa_map;
129
130 /* For real clock time reference. */
131 struct {
132 u64 tod_ns;
133 u64 clockid_ns;
134 u64 clockid_res_ns;
135 int clockid;
136 /*
137 * enabled is valid for report mode, and is true if above
138 * values are set, it's set in process_clock_data
139 */
140 bool enabled;
141 } clock;
142 arch_syscalls__strerrno_t *arch_strerrno;
143};
144
145enum perf_compress_type {
146 PERF_COMP_NONE = 0,
147 PERF_COMP_ZSTD,
148 PERF_COMP_MAX
149};
150
151struct bpf_prog_info_node;
152struct btf_node;
153
154extern struct perf_env perf_env;
155
156void perf_env__exit(struct perf_env *env);
157
158int perf_env__kernel_is_64_bit(struct perf_env *env);
159
160int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[]);
161
162int perf_env__read_cpuid(struct perf_env *env);
163int perf_env__read_pmu_mappings(struct perf_env *env);
164int perf_env__nr_pmu_mappings(struct perf_env *env);
165const char *perf_env__pmu_mappings(struct perf_env *env);
166
167int perf_env__read_cpu_topology_map(struct perf_env *env);
168
169void cpu_cache_level__free(struct cpu_cache_level *cache);
170
171const char *perf_env__arch(struct perf_env *env);
172const char *perf_env__arch_strerrno(struct perf_env *env, int err);
173const char *perf_env__cpuid(struct perf_env *env);
174const char *perf_env__raw_arch(struct perf_env *env);
175int perf_env__nr_cpus_avail(struct perf_env *env);
176
177void perf_env__init(struct perf_env *env);
178void __perf_env__insert_bpf_prog_info(struct perf_env *env,
179 struct bpf_prog_info_node *info_node);
180void perf_env__insert_bpf_prog_info(struct perf_env *env,
181 struct bpf_prog_info_node *info_node);
182struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
183 __u32 prog_id);
184bool perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
185bool __perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
186struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
187struct btf_node *__perf_env__find_btf(struct perf_env *env, __u32 btf_id);
188
189int perf_env__numa_node(struct perf_env *env, struct perf_cpu cpu);
190char *perf_env__find_pmu_cap(struct perf_env *env, const char *pmu_name,
191 const char *cap);
192
193bool perf_env__has_pmu_mapping(struct perf_env *env, const char *pmu_name);
194#endif /* __PERF_ENV_H */