Loading...
1#include "../perf.h"
2#include <stdlib.h>
3#include <stdio.h>
4#include <string.h>
5#include "session.h"
6#include "thread.h"
7#include "thread-stack.h"
8#include "util.h"
9#include "debug.h"
10#include "comm.h"
11#include "unwind.h"
12
13int thread__init_map_groups(struct thread *thread, struct machine *machine)
14{
15 struct thread *leader;
16 pid_t pid = thread->pid_;
17
18 if (pid == thread->tid || pid == -1) {
19 thread->mg = map_groups__new(machine);
20 } else {
21 leader = __machine__findnew_thread(machine, pid, pid);
22 if (leader) {
23 thread->mg = map_groups__get(leader->mg);
24 thread__put(leader);
25 }
26 }
27
28 return thread->mg ? 0 : -1;
29}
30
31struct thread *thread__new(pid_t pid, pid_t tid)
32{
33 char *comm_str;
34 struct comm *comm;
35 struct thread *thread = zalloc(sizeof(*thread));
36
37 if (thread != NULL) {
38 thread->pid_ = pid;
39 thread->tid = tid;
40 thread->ppid = -1;
41 thread->cpu = -1;
42 INIT_LIST_HEAD(&thread->comm_list);
43
44 if (unwind__prepare_access(thread) < 0)
45 goto err_thread;
46
47 comm_str = malloc(32);
48 if (!comm_str)
49 goto err_thread;
50
51 snprintf(comm_str, 32, ":%d", tid);
52 comm = comm__new(comm_str, 0, false);
53 free(comm_str);
54 if (!comm)
55 goto err_thread;
56
57 list_add(&comm->list, &thread->comm_list);
58 atomic_set(&thread->refcnt, 1);
59 RB_CLEAR_NODE(&thread->rb_node);
60 }
61
62 return thread;
63
64err_thread:
65 free(thread);
66 return NULL;
67}
68
69void thread__delete(struct thread *thread)
70{
71 struct comm *comm, *tmp;
72
73 BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
74
75 thread_stack__free(thread);
76
77 if (thread->mg) {
78 map_groups__put(thread->mg);
79 thread->mg = NULL;
80 }
81 list_for_each_entry_safe(comm, tmp, &thread->comm_list, list) {
82 list_del(&comm->list);
83 comm__free(comm);
84 }
85 unwind__finish_access(thread);
86
87 free(thread);
88}
89
90struct thread *thread__get(struct thread *thread)
91{
92 if (thread)
93 atomic_inc(&thread->refcnt);
94 return thread;
95}
96
97void thread__put(struct thread *thread)
98{
99 if (thread && atomic_dec_and_test(&thread->refcnt)) {
100 /*
101 * Remove it from the dead_threads list, as last reference
102 * is gone.
103 */
104 list_del_init(&thread->node);
105 thread__delete(thread);
106 }
107}
108
109struct comm *thread__comm(const struct thread *thread)
110{
111 if (list_empty(&thread->comm_list))
112 return NULL;
113
114 return list_first_entry(&thread->comm_list, struct comm, list);
115}
116
117struct comm *thread__exec_comm(const struct thread *thread)
118{
119 struct comm *comm, *last = NULL;
120
121 list_for_each_entry(comm, &thread->comm_list, list) {
122 if (comm->exec)
123 return comm;
124 last = comm;
125 }
126
127 return last;
128}
129
130int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
131 bool exec)
132{
133 struct comm *new, *curr = thread__comm(thread);
134 int err;
135
136 /* Override the default :tid entry */
137 if (!thread->comm_set) {
138 err = comm__override(curr, str, timestamp, exec);
139 if (err)
140 return err;
141 } else {
142 new = comm__new(str, timestamp, exec);
143 if (!new)
144 return -ENOMEM;
145 list_add(&new->list, &thread->comm_list);
146
147 if (exec)
148 unwind__flush_access(thread);
149 }
150
151 thread->comm_set = true;
152
153 return 0;
154}
155
156const char *thread__comm_str(const struct thread *thread)
157{
158 const struct comm *comm = thread__comm(thread);
159
160 if (!comm)
161 return NULL;
162
163 return comm__str(comm);
164}
165
166/* CHECKME: it should probably better return the max comm len from its comm list */
167int thread__comm_len(struct thread *thread)
168{
169 if (!thread->comm_len) {
170 const char *comm = thread__comm_str(thread);
171 if (!comm)
172 return 0;
173 thread->comm_len = strlen(comm);
174 }
175
176 return thread->comm_len;
177}
178
179size_t thread__fprintf(struct thread *thread, FILE *fp)
180{
181 return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
182 map_groups__fprintf(thread->mg, fp);
183}
184
185void thread__insert_map(struct thread *thread, struct map *map)
186{
187 map_groups__fixup_overlappings(thread->mg, map, stderr);
188 map_groups__insert(thread->mg, map);
189}
190
191static int thread__clone_map_groups(struct thread *thread,
192 struct thread *parent)
193{
194 int i;
195
196 /* This is new thread, we share map groups for process. */
197 if (thread->pid_ == parent->pid_)
198 return 0;
199
200 if (thread->mg == parent->mg) {
201 pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
202 thread->pid_, thread->tid, parent->pid_, parent->tid);
203 return 0;
204 }
205
206 /* But this one is new process, copy maps. */
207 for (i = 0; i < MAP__NR_TYPES; ++i)
208 if (map_groups__clone(thread->mg, parent->mg, i) < 0)
209 return -ENOMEM;
210
211 return 0;
212}
213
214int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp)
215{
216 int err;
217
218 if (parent->comm_set) {
219 const char *comm = thread__comm_str(parent);
220 if (!comm)
221 return -ENOMEM;
222 err = thread__set_comm(thread, comm, timestamp);
223 if (err)
224 return err;
225 }
226
227 thread->ppid = parent->tid;
228 return thread__clone_map_groups(thread, parent);
229}
230
231void thread__find_cpumode_addr_location(struct thread *thread,
232 enum map_type type, u64 addr,
233 struct addr_location *al)
234{
235 size_t i;
236 const u8 const cpumodes[] = {
237 PERF_RECORD_MISC_USER,
238 PERF_RECORD_MISC_KERNEL,
239 PERF_RECORD_MISC_GUEST_USER,
240 PERF_RECORD_MISC_GUEST_KERNEL
241 };
242
243 for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
244 thread__find_addr_location(thread, cpumodes[i], type, addr, al);
245 if (al->map)
246 break;
247 }
248}
1#include "../perf.h"
2#include <stdlib.h>
3#include <stdio.h>
4#include <string.h>
5#include "session.h"
6#include "thread.h"
7#include "util.h"
8#include "debug.h"
9
10static struct thread *thread__new(pid_t pid)
11{
12 struct thread *self = zalloc(sizeof(*self));
13
14 if (self != NULL) {
15 map_groups__init(&self->mg);
16 self->pid = pid;
17 self->comm = malloc(32);
18 if (self->comm)
19 snprintf(self->comm, 32, ":%d", self->pid);
20 }
21
22 return self;
23}
24
25void thread__delete(struct thread *self)
26{
27 map_groups__exit(&self->mg);
28 free(self->comm);
29 free(self);
30}
31
32int thread__set_comm(struct thread *self, const char *comm)
33{
34 int err;
35
36 if (self->comm)
37 free(self->comm);
38 self->comm = strdup(comm);
39 err = self->comm == NULL ? -ENOMEM : 0;
40 if (!err) {
41 self->comm_set = true;
42 map_groups__flush(&self->mg);
43 }
44 return err;
45}
46
47int thread__comm_len(struct thread *self)
48{
49 if (!self->comm_len) {
50 if (!self->comm)
51 return 0;
52 self->comm_len = strlen(self->comm);
53 }
54
55 return self->comm_len;
56}
57
58static size_t thread__fprintf(struct thread *self, FILE *fp)
59{
60 return fprintf(fp, "Thread %d %s\n", self->pid, self->comm) +
61 map_groups__fprintf(&self->mg, verbose, fp);
62}
63
64struct thread *perf_session__findnew(struct perf_session *self, pid_t pid)
65{
66 struct rb_node **p = &self->threads.rb_node;
67 struct rb_node *parent = NULL;
68 struct thread *th;
69
70 /*
71 * Font-end cache - PID lookups come in blocks,
72 * so most of the time we dont have to look up
73 * the full rbtree:
74 */
75 if (self->last_match && self->last_match->pid == pid)
76 return self->last_match;
77
78 while (*p != NULL) {
79 parent = *p;
80 th = rb_entry(parent, struct thread, rb_node);
81
82 if (th->pid == pid) {
83 self->last_match = th;
84 return th;
85 }
86
87 if (pid < th->pid)
88 p = &(*p)->rb_left;
89 else
90 p = &(*p)->rb_right;
91 }
92
93 th = thread__new(pid);
94 if (th != NULL) {
95 rb_link_node(&th->rb_node, parent, p);
96 rb_insert_color(&th->rb_node, &self->threads);
97 self->last_match = th;
98 }
99
100 return th;
101}
102
103void thread__insert_map(struct thread *self, struct map *map)
104{
105 map_groups__fixup_overlappings(&self->mg, map, verbose, stderr);
106 map_groups__insert(&self->mg, map);
107}
108
109int thread__fork(struct thread *self, struct thread *parent)
110{
111 int i;
112
113 if (parent->comm_set) {
114 if (self->comm)
115 free(self->comm);
116 self->comm = strdup(parent->comm);
117 if (!self->comm)
118 return -ENOMEM;
119 self->comm_set = true;
120 }
121
122 for (i = 0; i < MAP__NR_TYPES; ++i)
123 if (map_groups__clone(&self->mg, &parent->mg, i) < 0)
124 return -ENOMEM;
125 return 0;
126}
127
128size_t perf_session__fprintf(struct perf_session *self, FILE *fp)
129{
130 size_t ret = 0;
131 struct rb_node *nd;
132
133 for (nd = rb_first(&self->threads); nd; nd = rb_next(nd)) {
134 struct thread *pos = rb_entry(nd, struct thread, rb_node);
135
136 ret += thread__fprintf(pos, fp);
137 }
138
139 return ret;
140}