Linux Audio

Check our new training course

Open-source upstreaming

Need help get the support for your hardware in upstream Linux?
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#include <errno.h>
  3#include <stdlib.h>
  4#include <stdio.h>
  5#include <string.h>
  6#include <linux/kernel.h>
  7#include <linux/zalloc.h>
  8#include "dso.h"
  9#include "session.h"
 10#include "thread.h"
 11#include "thread-stack.h"
 12#include "debug.h"
 13#include "namespaces.h"
 14#include "comm.h"
 15#include "map.h"
 16#include "symbol.h"
 17#include "unwind.h"
 18#include "callchain.h"
 19
 20#include <api/fs/fs.h>
 21
 22int thread__init_map_groups(struct thread *thread, struct machine *machine)
 23{
 24	pid_t pid = thread->pid_;
 25
 26	if (pid == thread->tid || pid == -1) {
 27		thread->mg = map_groups__new(machine);
 28	} else {
 29		struct thread *leader = __machine__findnew_thread(machine, pid, pid);
 
 30		if (leader) {
 31			thread->mg = map_groups__get(leader->mg);
 32			thread__put(leader);
 33		}
 34	}
 35
 36	return thread->mg ? 0 : -1;
 37}
 38
 39struct thread *thread__new(pid_t pid, pid_t tid)
 40{
 41	char *comm_str;
 42	struct comm *comm;
 43	struct thread *thread = zalloc(sizeof(*thread));
 
 44
 45	if (thread != NULL) {
 46		thread->pid_ = pid;
 47		thread->tid = tid;
 48		thread->ppid = -1;
 49		thread->cpu = -1;
 50		INIT_LIST_HEAD(&thread->namespaces_list);
 51		INIT_LIST_HEAD(&thread->comm_list);
 52		init_rwsem(&thread->namespaces_lock);
 53		init_rwsem(&thread->comm_lock);
 
 
 54
 55		comm_str = malloc(32);
 56		if (!comm_str)
 57			goto err_thread;
 58
 59		snprintf(comm_str, 32, ":%d", tid);
 60		comm = comm__new(comm_str, 0, false);
 61		free(comm_str);
 62		if (!comm)
 63			goto err_thread;
 64
 65		list_add(&comm->list, &thread->comm_list);
 66		refcount_set(&thread->refcnt, 1);
 67		RB_CLEAR_NODE(&thread->rb_node);
 68		/* Thread holds first ref to nsdata. */
 69		thread->nsinfo = nsinfo__new(pid);
 70		srccode_state_init(&thread->srccode_state);
 71	}
 72
 73	return thread;
 74
 75err_thread:
 76	free(thread);
 77	return NULL;
 78}
 79
 
 
 
 
 
 
 
 
 
 80void thread__delete(struct thread *thread)
 81{
 82	struct namespaces *namespaces, *tmp_namespaces;
 83	struct comm *comm, *tmp_comm;
 84
 85	BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
 86
 87	thread_stack__free(thread);
 88
 89	if (thread->mg) {
 90		map_groups__put(thread->mg);
 91		thread->mg = NULL;
 92	}
 93	down_write(&thread->namespaces_lock);
 94	list_for_each_entry_safe(namespaces, tmp_namespaces,
 95				 &thread->namespaces_list, list) {
 96		list_del_init(&namespaces->list);
 97		namespaces__free(namespaces);
 98	}
 99	up_write(&thread->namespaces_lock);
100
101	down_write(&thread->comm_lock);
102	list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) {
103		list_del_init(&comm->list);
104		comm__free(comm);
105	}
106	up_write(&thread->comm_lock);
107
108	nsinfo__zput(thread->nsinfo);
109	srccode_state_free(&thread->srccode_state);
110
111	exit_rwsem(&thread->namespaces_lock);
112	exit_rwsem(&thread->comm_lock);
113	free(thread);
 
 
 
 
 
114}
115
116struct thread *thread__get(struct thread *thread)
117{
118	if (thread)
119		refcount_inc(&thread->refcnt);
120	return thread;
 
 
 
121}
122
123void thread__put(struct thread *thread)
124{
125	if (thread && refcount_dec_and_test(&thread->refcnt)) {
126		/*
127		 * Remove it from the dead threads list, as last reference is
128		 * gone, if it is in a dead threads list.
129		 *
130		 * We may not be there anymore if say, the machine where it was
131		 * stored was already deleted, so we already removed it from
132		 * the dead threads and some other piece of code still keeps a
133		 * reference.
134		 *
135		 * This is what 'perf sched' does and finally drops it in
136		 * perf_sched__lat(), where it calls perf_sched__read_events(),
137		 * that processes the events by creating a session and deleting
138		 * it, which ends up destroying the list heads for the dead
139		 * threads, but before it does that it removes all threads from
140		 * it using list_del_init().
141		 *
142		 * So we need to check here if it is in a dead threads list and
143		 * if so, remove it before finally deleting the thread, to avoid
144		 * an use after free situation.
145		 */
146		if (!list_empty(&thread->node))
147			list_del_init(&thread->node);
148		thread__delete(thread);
149	}
 
150}
151
152static struct namespaces *__thread__namespaces(const struct thread *thread)
153{
154	if (list_empty(&thread->namespaces_list))
155		return NULL;
156
157	return list_first_entry(&thread->namespaces_list, struct namespaces, list);
158}
159
160struct namespaces *thread__namespaces(struct thread *thread)
161{
162	struct namespaces *ns;
163
164	down_read(&thread->namespaces_lock);
165	ns = __thread__namespaces(thread);
166	up_read(&thread->namespaces_lock);
167
168	return ns;
169}
170
171static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
172				    struct perf_record_namespaces *event)
173{
174	struct namespaces *new, *curr = __thread__namespaces(thread);
175
176	new = namespaces__new(event);
177	if (!new)
178		return -ENOMEM;
179
180	list_add(&new->list, &thread->namespaces_list);
181
182	if (timestamp && curr) {
183		/*
184		 * setns syscall must have changed few or all the namespaces
185		 * of this thread. Update end time for the namespaces
186		 * previously used.
187		 */
188		curr = list_next_entry(new, list);
189		curr->end_time = timestamp;
190	}
191
192	return 0;
193}
194
195int thread__set_namespaces(struct thread *thread, u64 timestamp,
196			   struct perf_record_namespaces *event)
197{
198	int ret;
199
200	down_write(&thread->namespaces_lock);
201	ret = __thread__set_namespaces(thread, timestamp, event);
202	up_write(&thread->namespaces_lock);
203	return ret;
204}
205
206struct comm *thread__comm(const struct thread *thread)
207{
208	if (list_empty(&thread->comm_list))
209		return NULL;
210
211	return list_first_entry(&thread->comm_list, struct comm, list);
212}
213
214struct comm *thread__exec_comm(const struct thread *thread)
215{
216	struct comm *comm, *last = NULL, *second_last = NULL;
217
218	list_for_each_entry(comm, &thread->comm_list, list) {
219		if (comm->exec)
220			return comm;
221		second_last = last;
222		last = comm;
223	}
224
225	/*
226	 * 'last' with no start time might be the parent's comm of a synthesized
227	 * thread (created by processing a synthesized fork event). For a main
228	 * thread, that is very probably wrong. Prefer a later comm to avoid
229	 * that case.
230	 */
231	if (second_last && !last->start && thread->pid_ == thread->tid)
232		return second_last;
233
234	return last;
235}
236
237static int ____thread__set_comm(struct thread *thread, const char *str,
238				u64 timestamp, bool exec)
239{
240	struct comm *new, *curr = thread__comm(thread);
241
242	/* Override the default :tid entry */
243	if (!thread->comm_set) {
244		int err = comm__override(curr, str, timestamp, exec);
245		if (err)
246			return err;
247	} else {
248		new = comm__new(str, timestamp, exec);
249		if (!new)
250			return -ENOMEM;
251		list_add(&new->list, &thread->comm_list);
252
253		if (exec)
254			unwind__flush_access(thread->mg);
255	}
256
257	thread->comm_set = true;
258
259	return 0;
260}
261
262int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
263		       bool exec)
264{
265	int ret;
266
267	down_write(&thread->comm_lock);
268	ret = ____thread__set_comm(thread, str, timestamp, exec);
269	up_write(&thread->comm_lock);
270	return ret;
271}
272
273int thread__set_comm_from_proc(struct thread *thread)
274{
275	char path[64];
276	char *comm = NULL;
277	size_t sz;
278	int err = -1;
279
280	if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
281		       thread->pid_, thread->tid) >= (int)sizeof(path)) &&
282	    procfs__read_str(path, &comm, &sz) == 0) {
283		comm[sz - 1] = '\0';
284		err = thread__set_comm(thread, comm, 0);
285	}
286
287	return err;
288}
289
290static const char *__thread__comm_str(const struct thread *thread)
291{
292	const struct comm *comm = thread__comm(thread);
293
294	if (!comm)
295		return NULL;
296
297	return comm__str(comm);
298}
299
300const char *thread__comm_str(struct thread *thread)
301{
302	const char *str;
303
304	down_read(&thread->comm_lock);
305	str = __thread__comm_str(thread);
306	up_read(&thread->comm_lock);
307
308	return str;
309}
310
 
 
 
 
 
 
 
 
 
311/* CHECKME: it should probably better return the max comm len from its comm list */
312int thread__comm_len(struct thread *thread)
313{
314	if (!thread->comm_len) {
315		const char *comm = thread__comm_str(thread);
316		if (!comm)
317			return 0;
318		thread->comm_len = strlen(comm);
 
 
 
 
319	}
320
321	return thread->comm_len;
322}
323
324size_t thread__fprintf(struct thread *thread, FILE *fp)
325{
326	return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
327	       map_groups__fprintf(thread->mg, fp);
328}
329
330int thread__insert_map(struct thread *thread, struct map *map)
331{
332	int ret;
333
334	ret = unwind__prepare_access(thread->mg, map, NULL);
335	if (ret)
336		return ret;
337
338	map_groups__fixup_overlappings(thread->mg, map, stderr);
339	map_groups__insert(thread->mg, map);
340
341	return 0;
342}
343
344static int __thread__prepare_access(struct thread *thread)
 
 
 
 
 
345{
346	bool initialized = false;
347	int err = 0;
348	struct maps *maps = &thread->mg->maps;
349	struct map *map;
350
351	down_read(&maps->lock);
352
353	for (map = maps__first(maps); map; map = map__next(map)) {
354		err = unwind__prepare_access(thread->mg, map, &initialized);
355		if (err || initialized)
356			break;
357	}
358
359	up_read(&maps->lock);
360
361	return err;
362}
363
364static int thread__prepare_access(struct thread *thread)
365{
366	int err = 0;
 
 
367
368	if (dwarf_callchain_users)
369		err = __thread__prepare_access(thread);
 
 
370
371	return err;
372}
373
374static int thread__clone_map_groups(struct thread *thread,
375				    struct thread *parent,
376				    bool do_maps_clone)
377{
378	/* This is new thread, we share map groups for process. */
379	if (thread->pid_ == parent->pid_)
380		return thread__prepare_access(thread);
381
382	if (thread->mg == parent->mg) {
383		pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
384			 thread->pid_, thread->tid, parent->pid_, parent->tid);
 
385		return 0;
386	}
387	/* But this one is new process, copy maps. */
388	return do_maps_clone ? map_groups__clone(thread, parent->mg) : 0;
389}
390
391int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
392{
393	if (parent->comm_set) {
394		const char *comm = thread__comm_str(parent);
395		int err;
396		if (!comm)
397			return -ENOMEM;
398		err = thread__set_comm(thread, comm, timestamp);
399		if (err)
400			return err;
401	}
402
403	thread->ppid = parent->tid;
404	return thread__clone_map_groups(thread, parent, do_maps_clone);
405}
406
407void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
408					struct addr_location *al)
409{
410	size_t i;
411	const u8 cpumodes[] = {
412		PERF_RECORD_MISC_USER,
413		PERF_RECORD_MISC_KERNEL,
414		PERF_RECORD_MISC_GUEST_USER,
415		PERF_RECORD_MISC_GUEST_KERNEL
416	};
417
418	for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
419		thread__find_symbol(thread, cpumodes[i], addr, al);
420		if (al->map)
421			break;
422	}
423}
424
425struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
426{
427	if (thread->pid_ == thread->tid)
428		return thread__get(thread);
429
430	if (thread->pid_ == -1)
431		return NULL;
432
433	return machine__find_thread(machine, thread->pid_, thread->pid_);
434}
435
436int thread__memcpy(struct thread *thread, struct machine *machine,
437		   void *buf, u64 ip, int len, bool *is64bit)
438{
439       u8 cpumode = PERF_RECORD_MISC_USER;
440       struct addr_location al;
441       long offset;
442
443       if (machine__kernel_ip(machine, ip))
444               cpumode = PERF_RECORD_MISC_KERNEL;
445
446       if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso ||
447	   al.map->dso->data.status == DSO_DATA_STATUS_ERROR ||
448	   map__load(al.map) < 0)
449               return -1;
450
451       offset = al.map->map_ip(al.map, ip);
452       if (is64bit)
453               *is64bit = al.map->dso->is_64_bit;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
454
455       return dso__data_read_offset(al.map->dso, machine, offset, buf, len);
 
 
456}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2#include <errno.h>
  3#include <stdlib.h>
  4#include <stdio.h>
  5#include <string.h>
  6#include <linux/kernel.h>
  7#include <linux/zalloc.h>
  8#include "dso.h"
  9#include "session.h"
 10#include "thread.h"
 11#include "thread-stack.h"
 12#include "debug.h"
 13#include "namespaces.h"
 14#include "comm.h"
 15#include "map.h"
 16#include "symbol.h"
 17#include "unwind.h"
 18#include "callchain.h"
 19
 20#include <api/fs/fs.h>
 21
 22int thread__init_maps(struct thread *thread, struct machine *machine)
 23{
 24	pid_t pid = thread__pid(thread);
 25
 26	if (pid == thread__tid(thread) || pid == -1) {
 27		thread__set_maps(thread, maps__new(machine));
 28	} else {
 29		struct thread *leader = __machine__findnew_thread(machine, pid, pid);
 30
 31		if (leader) {
 32			thread__set_maps(thread, maps__get(thread__maps(leader)));
 33			thread__put(leader);
 34		}
 35	}
 36
 37	return thread__maps(thread) ? 0 : -1;
 38}
 39
 40struct thread *thread__new(pid_t pid, pid_t tid)
 41{
 42	char *comm_str;
 43	struct comm *comm;
 44	RC_STRUCT(thread) *_thread = zalloc(sizeof(*_thread));
 45	struct thread *thread;
 46
 47	if (ADD_RC_CHK(thread, _thread) != NULL) {
 48		thread__set_pid(thread, pid);
 49		thread__set_tid(thread, tid);
 50		thread__set_ppid(thread, -1);
 51		thread__set_cpu(thread, -1);
 52		thread__set_guest_cpu(thread, -1);
 53		thread__set_lbr_stitch_enable(thread, false);
 54		INIT_LIST_HEAD(thread__namespaces_list(thread));
 55		INIT_LIST_HEAD(thread__comm_list(thread));
 56		init_rwsem(thread__namespaces_lock(thread));
 57		init_rwsem(thread__comm_lock(thread));
 58
 59		comm_str = malloc(32);
 60		if (!comm_str)
 61			goto err_thread;
 62
 63		snprintf(comm_str, 32, ":%d", tid);
 64		comm = comm__new(comm_str, 0, false);
 65		free(comm_str);
 66		if (!comm)
 67			goto err_thread;
 68
 69		list_add(&comm->list, thread__comm_list(thread));
 70		refcount_set(thread__refcnt(thread), 1);
 
 71		/* Thread holds first ref to nsdata. */
 72		RC_CHK_ACCESS(thread)->nsinfo = nsinfo__new(pid);
 73		srccode_state_init(thread__srccode_state(thread));
 74	}
 75
 76	return thread;
 77
 78err_thread:
 79	free(thread);
 80	return NULL;
 81}
 82
 83static void (*thread__priv_destructor)(void *priv);
 84
 85void thread__set_priv_destructor(void (*destructor)(void *priv))
 86{
 87	assert(thread__priv_destructor == NULL);
 88
 89	thread__priv_destructor = destructor;
 90}
 91
 92void thread__delete(struct thread *thread)
 93{
 94	struct namespaces *namespaces, *tmp_namespaces;
 95	struct comm *comm, *tmp_comm;
 96
 
 
 97	thread_stack__free(thread);
 98
 99	if (thread__maps(thread)) {
100		maps__put(thread__maps(thread));
101		thread__set_maps(thread, NULL);
102	}
103	down_write(thread__namespaces_lock(thread));
104	list_for_each_entry_safe(namespaces, tmp_namespaces,
105				 thread__namespaces_list(thread), list) {
106		list_del_init(&namespaces->list);
107		namespaces__free(namespaces);
108	}
109	up_write(thread__namespaces_lock(thread));
110
111	down_write(thread__comm_lock(thread));
112	list_for_each_entry_safe(comm, tmp_comm, thread__comm_list(thread), list) {
113		list_del_init(&comm->list);
114		comm__free(comm);
115	}
116	up_write(thread__comm_lock(thread));
117
118	nsinfo__zput(RC_CHK_ACCESS(thread)->nsinfo);
119	srccode_state_free(thread__srccode_state(thread));
120
121	exit_rwsem(thread__namespaces_lock(thread));
122	exit_rwsem(thread__comm_lock(thread));
123	thread__free_stitch_list(thread);
124
125	if (thread__priv_destructor)
126		thread__priv_destructor(thread__priv(thread));
127
128	RC_CHK_FREE(thread);
129}
130
131struct thread *thread__get(struct thread *thread)
132{
133	struct thread *result;
134
135	if (RC_CHK_GET(result, thread))
136		refcount_inc(thread__refcnt(thread));
137
138	return result;
139}
140
141void thread__put(struct thread *thread)
142{
143	if (thread && refcount_dec_and_test(thread__refcnt(thread)))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144		thread__delete(thread);
145	else
146		RC_CHK_PUT(thread);
147}
148
149static struct namespaces *__thread__namespaces(struct thread *thread)
150{
151	if (list_empty(thread__namespaces_list(thread)))
152		return NULL;
153
154	return list_first_entry(thread__namespaces_list(thread), struct namespaces, list);
155}
156
157struct namespaces *thread__namespaces(struct thread *thread)
158{
159	struct namespaces *ns;
160
161	down_read(thread__namespaces_lock(thread));
162	ns = __thread__namespaces(thread);
163	up_read(thread__namespaces_lock(thread));
164
165	return ns;
166}
167
168static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
169				    struct perf_record_namespaces *event)
170{
171	struct namespaces *new, *curr = __thread__namespaces(thread);
172
173	new = namespaces__new(event);
174	if (!new)
175		return -ENOMEM;
176
177	list_add(&new->list, thread__namespaces_list(thread));
178
179	if (timestamp && curr) {
180		/*
181		 * setns syscall must have changed few or all the namespaces
182		 * of this thread. Update end time for the namespaces
183		 * previously used.
184		 */
185		curr = list_next_entry(new, list);
186		curr->end_time = timestamp;
187	}
188
189	return 0;
190}
191
192int thread__set_namespaces(struct thread *thread, u64 timestamp,
193			   struct perf_record_namespaces *event)
194{
195	int ret;
196
197	down_write(thread__namespaces_lock(thread));
198	ret = __thread__set_namespaces(thread, timestamp, event);
199	up_write(thread__namespaces_lock(thread));
200	return ret;
201}
202
203struct comm *thread__comm(struct thread *thread)
204{
205	if (list_empty(thread__comm_list(thread)))
206		return NULL;
207
208	return list_first_entry(thread__comm_list(thread), struct comm, list);
209}
210
211struct comm *thread__exec_comm(struct thread *thread)
212{
213	struct comm *comm, *last = NULL, *second_last = NULL;
214
215	list_for_each_entry(comm, thread__comm_list(thread), list) {
216		if (comm->exec)
217			return comm;
218		second_last = last;
219		last = comm;
220	}
221
222	/*
223	 * 'last' with no start time might be the parent's comm of a synthesized
224	 * thread (created by processing a synthesized fork event). For a main
225	 * thread, that is very probably wrong. Prefer a later comm to avoid
226	 * that case.
227	 */
228	if (second_last && !last->start && thread__pid(thread) == thread__tid(thread))
229		return second_last;
230
231	return last;
232}
233
234static int ____thread__set_comm(struct thread *thread, const char *str,
235				u64 timestamp, bool exec)
236{
237	struct comm *new, *curr = thread__comm(thread);
238
239	/* Override the default :tid entry */
240	if (!thread__comm_set(thread)) {
241		int err = comm__override(curr, str, timestamp, exec);
242		if (err)
243			return err;
244	} else {
245		new = comm__new(str, timestamp, exec);
246		if (!new)
247			return -ENOMEM;
248		list_add(&new->list, thread__comm_list(thread));
249
250		if (exec)
251			unwind__flush_access(thread__maps(thread));
252	}
253
254	thread__set_comm_set(thread, true);
255
256	return 0;
257}
258
259int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
260		       bool exec)
261{
262	int ret;
263
264	down_write(thread__comm_lock(thread));
265	ret = ____thread__set_comm(thread, str, timestamp, exec);
266	up_write(thread__comm_lock(thread));
267	return ret;
268}
269
270int thread__set_comm_from_proc(struct thread *thread)
271{
272	char path[64];
273	char *comm = NULL;
274	size_t sz;
275	int err = -1;
276
277	if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
278		       thread__pid(thread), thread__tid(thread)) >= (int)sizeof(path)) &&
279	    procfs__read_str(path, &comm, &sz) == 0) {
280		comm[sz - 1] = '\0';
281		err = thread__set_comm(thread, comm, 0);
282	}
283
284	return err;
285}
286
287static const char *__thread__comm_str(struct thread *thread)
288{
289	const struct comm *comm = thread__comm(thread);
290
291	if (!comm)
292		return NULL;
293
294	return comm__str(comm);
295}
296
297const char *thread__comm_str(struct thread *thread)
298{
299	const char *str;
300
301	down_read(thread__comm_lock(thread));
302	str = __thread__comm_str(thread);
303	up_read(thread__comm_lock(thread));
304
305	return str;
306}
307
308static int __thread__comm_len(struct thread *thread, const char *comm)
309{
310	if (!comm)
311		return 0;
312	thread__set_comm_len(thread, strlen(comm));
313
314	return thread__var_comm_len(thread);
315}
316
317/* CHECKME: it should probably better return the max comm len from its comm list */
318int thread__comm_len(struct thread *thread)
319{
320	int comm_len = thread__var_comm_len(thread);
321
322	if (!comm_len) {
323		const char *comm;
324
325		down_read(thread__comm_lock(thread));
326		comm = __thread__comm_str(thread);
327		comm_len = __thread__comm_len(thread, comm);
328		up_read(thread__comm_lock(thread));
329	}
330
331	return comm_len;
332}
333
334size_t thread__fprintf(struct thread *thread, FILE *fp)
335{
336	return fprintf(fp, "Thread %d %s\n", thread__tid(thread), thread__comm_str(thread)) +
337	       maps__fprintf(thread__maps(thread), fp);
338}
339
340int thread__insert_map(struct thread *thread, struct map *map)
341{
342	int ret;
343
344	ret = unwind__prepare_access(thread__maps(thread), map, NULL);
345	if (ret)
346		return ret;
347
348	return maps__fixup_overlap_and_insert(thread__maps(thread), map);
 
 
 
349}
350
351struct thread__prepare_access_maps_cb_args {
352	int err;
353	struct maps *maps;
354};
355
356static int thread__prepare_access_maps_cb(struct map *map, void *data)
357{
358	bool initialized = false;
359	struct thread__prepare_access_maps_cb_args *args = data;
 
 
 
 
 
 
 
 
 
 
360
361	args->err = unwind__prepare_access(args->maps, map, &initialized);
362
363	return (args->err || initialized) ? 1 : 0;
364}
365
366static int thread__prepare_access(struct thread *thread)
367{
368	struct thread__prepare_access_maps_cb_args args = {
369		.err = 0,
370	};
371
372	if (dwarf_callchain_users) {
373		args.maps = thread__maps(thread);
374		maps__for_each_map(thread__maps(thread), thread__prepare_access_maps_cb, &args);
375	}
376
377	return args.err;
378}
379
380static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
 
 
381{
382	/* This is new thread, we share map groups for process. */
383	if (thread__pid(thread) == thread__pid(parent))
384		return thread__prepare_access(thread);
385
386	if (RC_CHK_EQUAL(thread__maps(thread), thread__maps(parent))) {
387		pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
388			 thread__pid(thread), thread__tid(thread),
389			 thread__pid(parent), thread__tid(parent));
390		return 0;
391	}
392	/* But this one is new process, copy maps. */
393	return do_maps_clone ? maps__copy_from(thread__maps(thread), thread__maps(parent)) : 0;
394}
395
396int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
397{
398	if (thread__comm_set(parent)) {
399		const char *comm = thread__comm_str(parent);
400		int err;
401		if (!comm)
402			return -ENOMEM;
403		err = thread__set_comm(thread, comm, timestamp);
404		if (err)
405			return err;
406	}
407
408	thread__set_ppid(thread, thread__tid(parent));
409	return thread__clone_maps(thread, parent, do_maps_clone);
410}
411
412void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
413					struct addr_location *al)
414{
415	size_t i;
416	const u8 cpumodes[] = {
417		PERF_RECORD_MISC_USER,
418		PERF_RECORD_MISC_KERNEL,
419		PERF_RECORD_MISC_GUEST_USER,
420		PERF_RECORD_MISC_GUEST_KERNEL
421	};
422
423	for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
424		thread__find_symbol(thread, cpumodes[i], addr, al);
425		if (al->map)
426			break;
427	}
428}
429
430struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
431{
432	if (thread__pid(thread) == thread__tid(thread))
433		return thread__get(thread);
434
435	if (thread__pid(thread) == -1)
436		return NULL;
437
438	return machine__find_thread(machine, thread__pid(thread), thread__pid(thread));
439}
440
441int thread__memcpy(struct thread *thread, struct machine *machine,
442		   void *buf, u64 ip, int len, bool *is64bit)
443{
444	u8 cpumode = PERF_RECORD_MISC_USER;
445	struct addr_location al;
446	struct dso *dso;
447	long offset;
448
449	if (machine__kernel_ip(machine, ip))
450		cpumode = PERF_RECORD_MISC_KERNEL;
451
452	addr_location__init(&al);
453	if (!thread__find_map(thread, cpumode, ip, &al)) {
454		addr_location__exit(&al);
455		return -1;
456	}
457
458	dso = map__dso(al.map);
459
460	if (!dso || dso->data.status == DSO_DATA_STATUS_ERROR || map__load(al.map) < 0) {
461		addr_location__exit(&al);
462		return -1;
463	}
464
465	offset = map__map_ip(al.map, ip);
466	if (is64bit)
467		*is64bit = dso->is_64_bit;
468
469	addr_location__exit(&al);
470
471	return dso__data_read_offset(dso, machine, offset, buf, len);
472}
473
474void thread__free_stitch_list(struct thread *thread)
475{
476	struct lbr_stitch *lbr_stitch = thread__lbr_stitch(thread);
477	struct stitch_list *pos, *tmp;
478
479	if (!lbr_stitch)
480		return;
481
482	list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
483		list_del_init(&pos->node);
484		free(pos);
485	}
486
487	list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) {
488		list_del_init(&pos->node);
489		free(pos);
490	}
491
492	zfree(&lbr_stitch->prev_lbr_cursor);
493	free(thread__lbr_stitch(thread));
494	thread__set_lbr_stitch(thread, NULL);
495}