Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * thread-stack.c: Synthesize a thread's stack using call / return events
4 * Copyright (c) 2014, Intel Corporation.
5 */
6
7#include <linux/rbtree.h>
8#include <linux/list.h>
9#include <linux/log2.h>
10#include <linux/zalloc.h>
11#include <errno.h>
12#include <stdlib.h>
13#include <string.h>
14#include "thread.h"
15#include "event.h"
16#include "machine.h"
17#include "env.h"
18#include "debug.h"
19#include "symbol.h"
20#include "comm.h"
21#include "call-path.h"
22#include "thread-stack.h"
23
24#define STACK_GROWTH 2048
25
26/*
27 * State of retpoline detection.
28 *
29 * RETPOLINE_NONE: no retpoline detection
30 * X86_RETPOLINE_POSSIBLE: x86 retpoline possible
31 * X86_RETPOLINE_DETECTED: x86 retpoline detected
32 */
33enum retpoline_state_t {
34 RETPOLINE_NONE,
35 X86_RETPOLINE_POSSIBLE,
36 X86_RETPOLINE_DETECTED,
37};
38
39/**
40 * struct thread_stack_entry - thread stack entry.
41 * @ret_addr: return address
42 * @timestamp: timestamp (if known)
43 * @ref: external reference (e.g. db_id of sample)
44 * @branch_count: the branch count when the entry was created
45 * @insn_count: the instruction count when the entry was created
46 * @cyc_count the cycle count when the entry was created
47 * @db_id: id used for db-export
48 * @cp: call path
49 * @no_call: a 'call' was not seen
50 * @trace_end: a 'call' but trace ended
51 * @non_call: a branch but not a 'call' to the start of a different symbol
52 */
53struct thread_stack_entry {
54 u64 ret_addr;
55 u64 timestamp;
56 u64 ref;
57 u64 branch_count;
58 u64 insn_count;
59 u64 cyc_count;
60 u64 db_id;
61 struct call_path *cp;
62 bool no_call;
63 bool trace_end;
64 bool non_call;
65};
66
67/**
68 * struct thread_stack - thread stack constructed from 'call' and 'return'
69 * branch samples.
70 * @stack: array that holds the stack
71 * @cnt: number of entries in the stack
72 * @sz: current maximum stack size
73 * @trace_nr: current trace number
74 * @branch_count: running branch count
75 * @insn_count: running instruction count
76 * @cyc_count running cycle count
77 * @kernel_start: kernel start address
78 * @last_time: last timestamp
79 * @crp: call/return processor
80 * @comm: current comm
81 * @arr_sz: size of array if this is the first element of an array
82 * @rstate: used to detect retpolines
83 */
84struct thread_stack {
85 struct thread_stack_entry *stack;
86 size_t cnt;
87 size_t sz;
88 u64 trace_nr;
89 u64 branch_count;
90 u64 insn_count;
91 u64 cyc_count;
92 u64 kernel_start;
93 u64 last_time;
94 struct call_return_processor *crp;
95 struct comm *comm;
96 unsigned int arr_sz;
97 enum retpoline_state_t rstate;
98};
99
100/*
101 * Assume pid == tid == 0 identifies the idle task as defined by
102 * perf_session__register_idle_thread(). The idle task is really 1 task per cpu,
103 * and therefore requires a stack for each cpu.
104 */
105static inline bool thread_stack__per_cpu(struct thread *thread)
106{
107 return !(thread->tid || thread->pid_);
108}
109
110static int thread_stack__grow(struct thread_stack *ts)
111{
112 struct thread_stack_entry *new_stack;
113 size_t sz, new_sz;
114
115 new_sz = ts->sz + STACK_GROWTH;
116 sz = new_sz * sizeof(struct thread_stack_entry);
117
118 new_stack = realloc(ts->stack, sz);
119 if (!new_stack)
120 return -ENOMEM;
121
122 ts->stack = new_stack;
123 ts->sz = new_sz;
124
125 return 0;
126}
127
128static int thread_stack__init(struct thread_stack *ts, struct thread *thread,
129 struct call_return_processor *crp)
130{
131 int err;
132
133 err = thread_stack__grow(ts);
134 if (err)
135 return err;
136
137 if (thread->mg && thread->mg->machine) {
138 struct machine *machine = thread->mg->machine;
139 const char *arch = perf_env__arch(machine->env);
140
141 ts->kernel_start = machine__kernel_start(machine);
142 if (!strcmp(arch, "x86"))
143 ts->rstate = X86_RETPOLINE_POSSIBLE;
144 } else {
145 ts->kernel_start = 1ULL << 63;
146 }
147 ts->crp = crp;
148
149 return 0;
150}
151
152static struct thread_stack *thread_stack__new(struct thread *thread, int cpu,
153 struct call_return_processor *crp)
154{
155 struct thread_stack *ts = thread->ts, *new_ts;
156 unsigned int old_sz = ts ? ts->arr_sz : 0;
157 unsigned int new_sz = 1;
158
159 if (thread_stack__per_cpu(thread) && cpu > 0)
160 new_sz = roundup_pow_of_two(cpu + 1);
161
162 if (!ts || new_sz > old_sz) {
163 new_ts = calloc(new_sz, sizeof(*ts));
164 if (!new_ts)
165 return NULL;
166 if (ts)
167 memcpy(new_ts, ts, old_sz * sizeof(*ts));
168 new_ts->arr_sz = new_sz;
169 zfree(&thread->ts);
170 thread->ts = new_ts;
171 ts = new_ts;
172 }
173
174 if (thread_stack__per_cpu(thread) && cpu > 0 &&
175 (unsigned int)cpu < ts->arr_sz)
176 ts += cpu;
177
178 if (!ts->stack &&
179 thread_stack__init(ts, thread, crp))
180 return NULL;
181
182 return ts;
183}
184
185static struct thread_stack *thread__cpu_stack(struct thread *thread, int cpu)
186{
187 struct thread_stack *ts = thread->ts;
188
189 if (cpu < 0)
190 cpu = 0;
191
192 if (!ts || (unsigned int)cpu >= ts->arr_sz)
193 return NULL;
194
195 ts += cpu;
196
197 if (!ts->stack)
198 return NULL;
199
200 return ts;
201}
202
203static inline struct thread_stack *thread__stack(struct thread *thread,
204 int cpu)
205{
206 if (!thread)
207 return NULL;
208
209 if (thread_stack__per_cpu(thread))
210 return thread__cpu_stack(thread, cpu);
211
212 return thread->ts;
213}
214
215static int thread_stack__push(struct thread_stack *ts, u64 ret_addr,
216 bool trace_end)
217{
218 int err = 0;
219
220 if (ts->cnt == ts->sz) {
221 err = thread_stack__grow(ts);
222 if (err) {
223 pr_warning("Out of memory: discarding thread stack\n");
224 ts->cnt = 0;
225 }
226 }
227
228 ts->stack[ts->cnt].trace_end = trace_end;
229 ts->stack[ts->cnt++].ret_addr = ret_addr;
230
231 return err;
232}
233
234static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
235{
236 size_t i;
237
238 /*
239 * In some cases there may be functions which are not seen to return.
240 * For example when setjmp / longjmp has been used. Or the perf context
241 * switch in the kernel which doesn't stop and start tracing in exactly
242 * the same code path. When that happens the return address will be
243 * further down the stack. If the return address is not found at all,
244 * we assume the opposite (i.e. this is a return for a call that wasn't
245 * seen for some reason) and leave the stack alone.
246 */
247 for (i = ts->cnt; i; ) {
248 if (ts->stack[--i].ret_addr == ret_addr) {
249 ts->cnt = i;
250 return;
251 }
252 }
253}
254
255static void thread_stack__pop_trace_end(struct thread_stack *ts)
256{
257 size_t i;
258
259 for (i = ts->cnt; i; ) {
260 if (ts->stack[--i].trace_end)
261 ts->cnt = i;
262 else
263 return;
264 }
265}
266
267static bool thread_stack__in_kernel(struct thread_stack *ts)
268{
269 if (!ts->cnt)
270 return false;
271
272 return ts->stack[ts->cnt - 1].cp->in_kernel;
273}
274
275static int thread_stack__call_return(struct thread *thread,
276 struct thread_stack *ts, size_t idx,
277 u64 timestamp, u64 ref, bool no_return)
278{
279 struct call_return_processor *crp = ts->crp;
280 struct thread_stack_entry *tse;
281 struct call_return cr = {
282 .thread = thread,
283 .comm = ts->comm,
284 .db_id = 0,
285 };
286 u64 *parent_db_id;
287
288 tse = &ts->stack[idx];
289 cr.cp = tse->cp;
290 cr.call_time = tse->timestamp;
291 cr.return_time = timestamp;
292 cr.branch_count = ts->branch_count - tse->branch_count;
293 cr.insn_count = ts->insn_count - tse->insn_count;
294 cr.cyc_count = ts->cyc_count - tse->cyc_count;
295 cr.db_id = tse->db_id;
296 cr.call_ref = tse->ref;
297 cr.return_ref = ref;
298 if (tse->no_call)
299 cr.flags |= CALL_RETURN_NO_CALL;
300 if (no_return)
301 cr.flags |= CALL_RETURN_NO_RETURN;
302 if (tse->non_call)
303 cr.flags |= CALL_RETURN_NON_CALL;
304
305 /*
306 * The parent db_id must be assigned before exporting the child. Note
307 * it is not possible to export the parent first because its information
308 * is not yet complete because its 'return' has not yet been processed.
309 */
310 parent_db_id = idx ? &(tse - 1)->db_id : NULL;
311
312 return crp->process(&cr, parent_db_id, crp->data);
313}
314
315static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
316{
317 struct call_return_processor *crp = ts->crp;
318 int err;
319
320 if (!crp) {
321 ts->cnt = 0;
322 return 0;
323 }
324
325 while (ts->cnt) {
326 err = thread_stack__call_return(thread, ts, --ts->cnt,
327 ts->last_time, 0, true);
328 if (err) {
329 pr_err("Error flushing thread stack!\n");
330 ts->cnt = 0;
331 return err;
332 }
333 }
334
335 return 0;
336}
337
338int thread_stack__flush(struct thread *thread)
339{
340 struct thread_stack *ts = thread->ts;
341 unsigned int pos;
342 int err = 0;
343
344 if (ts) {
345 for (pos = 0; pos < ts->arr_sz; pos++) {
346 int ret = __thread_stack__flush(thread, ts + pos);
347
348 if (ret)
349 err = ret;
350 }
351 }
352
353 return err;
354}
355
356int thread_stack__event(struct thread *thread, int cpu, u32 flags, u64 from_ip,
357 u64 to_ip, u16 insn_len, u64 trace_nr)
358{
359 struct thread_stack *ts = thread__stack(thread, cpu);
360
361 if (!thread)
362 return -EINVAL;
363
364 if (!ts) {
365 ts = thread_stack__new(thread, cpu, NULL);
366 if (!ts) {
367 pr_warning("Out of memory: no thread stack\n");
368 return -ENOMEM;
369 }
370 ts->trace_nr = trace_nr;
371 }
372
373 /*
374 * When the trace is discontinuous, the trace_nr changes. In that case
375 * the stack might be completely invalid. Better to report nothing than
376 * to report something misleading, so flush the stack.
377 */
378 if (trace_nr != ts->trace_nr) {
379 if (ts->trace_nr)
380 __thread_stack__flush(thread, ts);
381 ts->trace_nr = trace_nr;
382 }
383
384 /* Stop here if thread_stack__process() is in use */
385 if (ts->crp)
386 return 0;
387
388 if (flags & PERF_IP_FLAG_CALL) {
389 u64 ret_addr;
390
391 if (!to_ip)
392 return 0;
393 ret_addr = from_ip + insn_len;
394 if (ret_addr == to_ip)
395 return 0; /* Zero-length calls are excluded */
396 return thread_stack__push(ts, ret_addr,
397 flags & PERF_IP_FLAG_TRACE_END);
398 } else if (flags & PERF_IP_FLAG_TRACE_BEGIN) {
399 /*
400 * If the caller did not change the trace number (which would
401 * have flushed the stack) then try to make sense of the stack.
402 * Possibly, tracing began after returning to the current
403 * address, so try to pop that. Also, do not expect a call made
404 * when the trace ended, to return, so pop that.
405 */
406 thread_stack__pop(ts, to_ip);
407 thread_stack__pop_trace_end(ts);
408 } else if ((flags & PERF_IP_FLAG_RETURN) && from_ip) {
409 thread_stack__pop(ts, to_ip);
410 }
411
412 return 0;
413}
414
415void thread_stack__set_trace_nr(struct thread *thread, int cpu, u64 trace_nr)
416{
417 struct thread_stack *ts = thread__stack(thread, cpu);
418
419 if (!ts)
420 return;
421
422 if (trace_nr != ts->trace_nr) {
423 if (ts->trace_nr)
424 __thread_stack__flush(thread, ts);
425 ts->trace_nr = trace_nr;
426 }
427}
428
429static void __thread_stack__free(struct thread *thread, struct thread_stack *ts)
430{
431 __thread_stack__flush(thread, ts);
432 zfree(&ts->stack);
433}
434
435static void thread_stack__reset(struct thread *thread, struct thread_stack *ts)
436{
437 unsigned int arr_sz = ts->arr_sz;
438
439 __thread_stack__free(thread, ts);
440 memset(ts, 0, sizeof(*ts));
441 ts->arr_sz = arr_sz;
442}
443
444void thread_stack__free(struct thread *thread)
445{
446 struct thread_stack *ts = thread->ts;
447 unsigned int pos;
448
449 if (ts) {
450 for (pos = 0; pos < ts->arr_sz; pos++)
451 __thread_stack__free(thread, ts + pos);
452 zfree(&thread->ts);
453 }
454}
455
456static inline u64 callchain_context(u64 ip, u64 kernel_start)
457{
458 return ip < kernel_start ? PERF_CONTEXT_USER : PERF_CONTEXT_KERNEL;
459}
460
461void thread_stack__sample(struct thread *thread, int cpu,
462 struct ip_callchain *chain,
463 size_t sz, u64 ip, u64 kernel_start)
464{
465 struct thread_stack *ts = thread__stack(thread, cpu);
466 u64 context = callchain_context(ip, kernel_start);
467 u64 last_context;
468 size_t i, j;
469
470 if (sz < 2) {
471 chain->nr = 0;
472 return;
473 }
474
475 chain->ips[0] = context;
476 chain->ips[1] = ip;
477
478 if (!ts) {
479 chain->nr = 2;
480 return;
481 }
482
483 last_context = context;
484
485 for (i = 2, j = 1; i < sz && j <= ts->cnt; i++, j++) {
486 ip = ts->stack[ts->cnt - j].ret_addr;
487 context = callchain_context(ip, kernel_start);
488 if (context != last_context) {
489 if (i >= sz - 1)
490 break;
491 chain->ips[i++] = context;
492 last_context = context;
493 }
494 chain->ips[i] = ip;
495 }
496
497 chain->nr = i;
498}
499
500struct call_return_processor *
501call_return_processor__new(int (*process)(struct call_return *cr, u64 *parent_db_id, void *data),
502 void *data)
503{
504 struct call_return_processor *crp;
505
506 crp = zalloc(sizeof(struct call_return_processor));
507 if (!crp)
508 return NULL;
509 crp->cpr = call_path_root__new();
510 if (!crp->cpr)
511 goto out_free;
512 crp->process = process;
513 crp->data = data;
514 return crp;
515
516out_free:
517 free(crp);
518 return NULL;
519}
520
521void call_return_processor__free(struct call_return_processor *crp)
522{
523 if (crp) {
524 call_path_root__free(crp->cpr);
525 free(crp);
526 }
527}
528
529static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
530 u64 timestamp, u64 ref, struct call_path *cp,
531 bool no_call, bool trace_end)
532{
533 struct thread_stack_entry *tse;
534 int err;
535
536 if (!cp)
537 return -ENOMEM;
538
539 if (ts->cnt == ts->sz) {
540 err = thread_stack__grow(ts);
541 if (err)
542 return err;
543 }
544
545 tse = &ts->stack[ts->cnt++];
546 tse->ret_addr = ret_addr;
547 tse->timestamp = timestamp;
548 tse->ref = ref;
549 tse->branch_count = ts->branch_count;
550 tse->insn_count = ts->insn_count;
551 tse->cyc_count = ts->cyc_count;
552 tse->cp = cp;
553 tse->no_call = no_call;
554 tse->trace_end = trace_end;
555 tse->non_call = false;
556 tse->db_id = 0;
557
558 return 0;
559}
560
561static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
562 u64 ret_addr, u64 timestamp, u64 ref,
563 struct symbol *sym)
564{
565 int err;
566
567 if (!ts->cnt)
568 return 1;
569
570 if (ts->cnt == 1) {
571 struct thread_stack_entry *tse = &ts->stack[0];
572
573 if (tse->cp->sym == sym)
574 return thread_stack__call_return(thread, ts, --ts->cnt,
575 timestamp, ref, false);
576 }
577
578 if (ts->stack[ts->cnt - 1].ret_addr == ret_addr &&
579 !ts->stack[ts->cnt - 1].non_call) {
580 return thread_stack__call_return(thread, ts, --ts->cnt,
581 timestamp, ref, false);
582 } else {
583 size_t i = ts->cnt - 1;
584
585 while (i--) {
586 if (ts->stack[i].ret_addr != ret_addr ||
587 ts->stack[i].non_call)
588 continue;
589 i += 1;
590 while (ts->cnt > i) {
591 err = thread_stack__call_return(thread, ts,
592 --ts->cnt,
593 timestamp, ref,
594 true);
595 if (err)
596 return err;
597 }
598 return thread_stack__call_return(thread, ts, --ts->cnt,
599 timestamp, ref, false);
600 }
601 }
602
603 return 1;
604}
605
606static int thread_stack__bottom(struct thread_stack *ts,
607 struct perf_sample *sample,
608 struct addr_location *from_al,
609 struct addr_location *to_al, u64 ref)
610{
611 struct call_path_root *cpr = ts->crp->cpr;
612 struct call_path *cp;
613 struct symbol *sym;
614 u64 ip;
615
616 if (sample->ip) {
617 ip = sample->ip;
618 sym = from_al->sym;
619 } else if (sample->addr) {
620 ip = sample->addr;
621 sym = to_al->sym;
622 } else {
623 return 0;
624 }
625
626 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
627 ts->kernel_start);
628
629 return thread_stack__push_cp(ts, ip, sample->time, ref, cp,
630 true, false);
631}
632
633static int thread_stack__pop_ks(struct thread *thread, struct thread_stack *ts,
634 struct perf_sample *sample, u64 ref)
635{
636 u64 tm = sample->time;
637 int err;
638
639 /* Return to userspace, so pop all kernel addresses */
640 while (thread_stack__in_kernel(ts)) {
641 err = thread_stack__call_return(thread, ts, --ts->cnt,
642 tm, ref, true);
643 if (err)
644 return err;
645 }
646
647 return 0;
648}
649
650static int thread_stack__no_call_return(struct thread *thread,
651 struct thread_stack *ts,
652 struct perf_sample *sample,
653 struct addr_location *from_al,
654 struct addr_location *to_al, u64 ref)
655{
656 struct call_path_root *cpr = ts->crp->cpr;
657 struct call_path *root = &cpr->call_path;
658 struct symbol *fsym = from_al->sym;
659 struct symbol *tsym = to_al->sym;
660 struct call_path *cp, *parent;
661 u64 ks = ts->kernel_start;
662 u64 addr = sample->addr;
663 u64 tm = sample->time;
664 u64 ip = sample->ip;
665 int err;
666
667 if (ip >= ks && addr < ks) {
668 /* Return to userspace, so pop all kernel addresses */
669 err = thread_stack__pop_ks(thread, ts, sample, ref);
670 if (err)
671 return err;
672
673 /* If the stack is empty, push the userspace address */
674 if (!ts->cnt) {
675 cp = call_path__findnew(cpr, root, tsym, addr, ks);
676 return thread_stack__push_cp(ts, 0, tm, ref, cp, true,
677 false);
678 }
679 } else if (thread_stack__in_kernel(ts) && ip < ks) {
680 /* Return to userspace, so pop all kernel addresses */
681 err = thread_stack__pop_ks(thread, ts, sample, ref);
682 if (err)
683 return err;
684 }
685
686 if (ts->cnt)
687 parent = ts->stack[ts->cnt - 1].cp;
688 else
689 parent = root;
690
691 if (parent->sym == from_al->sym) {
692 /*
693 * At the bottom of the stack, assume the missing 'call' was
694 * before the trace started. So, pop the current symbol and push
695 * the 'to' symbol.
696 */
697 if (ts->cnt == 1) {
698 err = thread_stack__call_return(thread, ts, --ts->cnt,
699 tm, ref, false);
700 if (err)
701 return err;
702 }
703
704 if (!ts->cnt) {
705 cp = call_path__findnew(cpr, root, tsym, addr, ks);
706
707 return thread_stack__push_cp(ts, addr, tm, ref, cp,
708 true, false);
709 }
710
711 /*
712 * Otherwise assume the 'return' is being used as a jump (e.g.
713 * retpoline) and just push the 'to' symbol.
714 */
715 cp = call_path__findnew(cpr, parent, tsym, addr, ks);
716
717 err = thread_stack__push_cp(ts, 0, tm, ref, cp, true, false);
718 if (!err)
719 ts->stack[ts->cnt - 1].non_call = true;
720
721 return err;
722 }
723
724 /*
725 * Assume 'parent' has not yet returned, so push 'to', and then push and
726 * pop 'from'.
727 */
728
729 cp = call_path__findnew(cpr, parent, tsym, addr, ks);
730
731 err = thread_stack__push_cp(ts, addr, tm, ref, cp, true, false);
732 if (err)
733 return err;
734
735 cp = call_path__findnew(cpr, cp, fsym, ip, ks);
736
737 err = thread_stack__push_cp(ts, ip, tm, ref, cp, true, false);
738 if (err)
739 return err;
740
741 return thread_stack__call_return(thread, ts, --ts->cnt, tm, ref, false);
742}
743
744static int thread_stack__trace_begin(struct thread *thread,
745 struct thread_stack *ts, u64 timestamp,
746 u64 ref)
747{
748 struct thread_stack_entry *tse;
749 int err;
750
751 if (!ts->cnt)
752 return 0;
753
754 /* Pop trace end */
755 tse = &ts->stack[ts->cnt - 1];
756 if (tse->trace_end) {
757 err = thread_stack__call_return(thread, ts, --ts->cnt,
758 timestamp, ref, false);
759 if (err)
760 return err;
761 }
762
763 return 0;
764}
765
766static int thread_stack__trace_end(struct thread_stack *ts,
767 struct perf_sample *sample, u64 ref)
768{
769 struct call_path_root *cpr = ts->crp->cpr;
770 struct call_path *cp;
771 u64 ret_addr;
772
773 /* No point having 'trace end' on the bottom of the stack */
774 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
775 return 0;
776
777 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
778 ts->kernel_start);
779
780 ret_addr = sample->ip + sample->insn_len;
781
782 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
783 false, true);
784}
785
786static bool is_x86_retpoline(const char *name)
787{
788 const char *p = strstr(name, "__x86_indirect_thunk_");
789
790 return p == name || !strcmp(name, "__indirect_thunk_start");
791}
792
793/*
794 * x86 retpoline functions pollute the call graph. This function removes them.
795 * This does not handle function return thunks, nor is there any improvement
796 * for the handling of inline thunks or extern thunks.
797 */
798static int thread_stack__x86_retpoline(struct thread_stack *ts,
799 struct perf_sample *sample,
800 struct addr_location *to_al)
801{
802 struct thread_stack_entry *tse = &ts->stack[ts->cnt - 1];
803 struct call_path_root *cpr = ts->crp->cpr;
804 struct symbol *sym = tse->cp->sym;
805 struct symbol *tsym = to_al->sym;
806 struct call_path *cp;
807
808 if (sym && is_x86_retpoline(sym->name)) {
809 /*
810 * This is a x86 retpoline fn. It pollutes the call graph by
811 * showing up everywhere there is an indirect branch, but does
812 * not itself mean anything. Here the top-of-stack is removed,
813 * by decrementing the stack count, and then further down, the
814 * resulting top-of-stack is replaced with the actual target.
815 * The result is that the retpoline functions will no longer
816 * appear in the call graph. Note this only affects the call
817 * graph, since all the original branches are left unchanged.
818 */
819 ts->cnt -= 1;
820 sym = ts->stack[ts->cnt - 2].cp->sym;
821 if (sym && sym == tsym && to_al->addr != tsym->start) {
822 /*
823 * Target is back to the middle of the symbol we came
824 * from so assume it is an indirect jmp and forget it
825 * altogether.
826 */
827 ts->cnt -= 1;
828 return 0;
829 }
830 } else if (sym && sym == tsym) {
831 /*
832 * Target is back to the symbol we came from so assume it is an
833 * indirect jmp and forget it altogether.
834 */
835 ts->cnt -= 1;
836 return 0;
837 }
838
839 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 2].cp, tsym,
840 sample->addr, ts->kernel_start);
841 if (!cp)
842 return -ENOMEM;
843
844 /* Replace the top-of-stack with the actual target */
845 ts->stack[ts->cnt - 1].cp = cp;
846
847 return 0;
848}
849
850int thread_stack__process(struct thread *thread, struct comm *comm,
851 struct perf_sample *sample,
852 struct addr_location *from_al,
853 struct addr_location *to_al, u64 ref,
854 struct call_return_processor *crp)
855{
856 struct thread_stack *ts = thread__stack(thread, sample->cpu);
857 enum retpoline_state_t rstate;
858 int err = 0;
859
860 if (ts && !ts->crp) {
861 /* Supersede thread_stack__event() */
862 thread_stack__reset(thread, ts);
863 ts = NULL;
864 }
865
866 if (!ts) {
867 ts = thread_stack__new(thread, sample->cpu, crp);
868 if (!ts)
869 return -ENOMEM;
870 ts->comm = comm;
871 }
872
873 rstate = ts->rstate;
874 if (rstate == X86_RETPOLINE_DETECTED)
875 ts->rstate = X86_RETPOLINE_POSSIBLE;
876
877 /* Flush stack on exec */
878 if (ts->comm != comm && thread->pid_ == thread->tid) {
879 err = __thread_stack__flush(thread, ts);
880 if (err)
881 return err;
882 ts->comm = comm;
883 }
884
885 /* If the stack is empty, put the current symbol on the stack */
886 if (!ts->cnt) {
887 err = thread_stack__bottom(ts, sample, from_al, to_al, ref);
888 if (err)
889 return err;
890 }
891
892 ts->branch_count += 1;
893 ts->insn_count += sample->insn_cnt;
894 ts->cyc_count += sample->cyc_cnt;
895 ts->last_time = sample->time;
896
897 if (sample->flags & PERF_IP_FLAG_CALL) {
898 bool trace_end = sample->flags & PERF_IP_FLAG_TRACE_END;
899 struct call_path_root *cpr = ts->crp->cpr;
900 struct call_path *cp;
901 u64 ret_addr;
902
903 if (!sample->ip || !sample->addr)
904 return 0;
905
906 ret_addr = sample->ip + sample->insn_len;
907 if (ret_addr == sample->addr)
908 return 0; /* Zero-length calls are excluded */
909
910 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
911 to_al->sym, sample->addr,
912 ts->kernel_start);
913 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
914 cp, false, trace_end);
915
916 /*
917 * A call to the same symbol but not the start of the symbol,
918 * may be the start of a x86 retpoline.
919 */
920 if (!err && rstate == X86_RETPOLINE_POSSIBLE && to_al->sym &&
921 from_al->sym == to_al->sym &&
922 to_al->addr != to_al->sym->start)
923 ts->rstate = X86_RETPOLINE_DETECTED;
924
925 } else if (sample->flags & PERF_IP_FLAG_RETURN) {
926 if (!sample->addr) {
927 u32 return_from_kernel = PERF_IP_FLAG_SYSCALLRET |
928 PERF_IP_FLAG_INTERRUPT;
929
930 if (!(sample->flags & return_from_kernel))
931 return 0;
932
933 /* Pop kernel stack */
934 return thread_stack__pop_ks(thread, ts, sample, ref);
935 }
936
937 if (!sample->ip)
938 return 0;
939
940 /* x86 retpoline 'return' doesn't match the stack */
941 if (rstate == X86_RETPOLINE_DETECTED && ts->cnt > 2 &&
942 ts->stack[ts->cnt - 1].ret_addr != sample->addr)
943 return thread_stack__x86_retpoline(ts, sample, to_al);
944
945 err = thread_stack__pop_cp(thread, ts, sample->addr,
946 sample->time, ref, from_al->sym);
947 if (err) {
948 if (err < 0)
949 return err;
950 err = thread_stack__no_call_return(thread, ts, sample,
951 from_al, to_al, ref);
952 }
953 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
954 err = thread_stack__trace_begin(thread, ts, sample->time, ref);
955 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
956 err = thread_stack__trace_end(ts, sample, ref);
957 } else if (sample->flags & PERF_IP_FLAG_BRANCH &&
958 from_al->sym != to_al->sym && to_al->sym &&
959 to_al->addr == to_al->sym->start) {
960 struct call_path_root *cpr = ts->crp->cpr;
961 struct call_path *cp;
962
963 /*
964 * The compiler might optimize a call/ret combination by making
965 * it a jmp. Make that visible by recording on the stack a
966 * branch to the start of a different symbol. Note, that means
967 * when a ret pops the stack, all jmps must be popped off first.
968 */
969 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
970 to_al->sym, sample->addr,
971 ts->kernel_start);
972 err = thread_stack__push_cp(ts, 0, sample->time, ref, cp, false,
973 false);
974 if (!err)
975 ts->stack[ts->cnt - 1].non_call = true;
976 }
977
978 return err;
979}
980
981size_t thread_stack__depth(struct thread *thread, int cpu)
982{
983 struct thread_stack *ts = thread__stack(thread, cpu);
984
985 if (!ts)
986 return 0;
987 return ts->cnt;
988}
1/*
2 * thread-stack.c: Synthesize a thread's stack using call / return events
3 * Copyright (c) 2014, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 */
15
16#include <linux/rbtree.h>
17#include <linux/list.h>
18#include "thread.h"
19#include "event.h"
20#include "machine.h"
21#include "util.h"
22#include "debug.h"
23#include "symbol.h"
24#include "comm.h"
25#include "thread-stack.h"
26
27#define CALL_PATH_BLOCK_SHIFT 8
28#define CALL_PATH_BLOCK_SIZE (1 << CALL_PATH_BLOCK_SHIFT)
29#define CALL_PATH_BLOCK_MASK (CALL_PATH_BLOCK_SIZE - 1)
30
31struct call_path_block {
32 struct call_path cp[CALL_PATH_BLOCK_SIZE];
33 struct list_head node;
34};
35
36/**
37 * struct call_path_root - root of all call paths.
38 * @call_path: root call path
39 * @blocks: list of blocks to store call paths
40 * @next: next free space
41 * @sz: number of spaces
42 */
43struct call_path_root {
44 struct call_path call_path;
45 struct list_head blocks;
46 size_t next;
47 size_t sz;
48};
49
50/**
51 * struct call_return_processor - provides a call-back to consume call-return
52 * information.
53 * @cpr: call path root
54 * @process: call-back that accepts call/return information
55 * @data: anonymous data for call-back
56 */
57struct call_return_processor {
58 struct call_path_root *cpr;
59 int (*process)(struct call_return *cr, void *data);
60 void *data;
61};
62
63#define STACK_GROWTH 2048
64
65/**
66 * struct thread_stack_entry - thread stack entry.
67 * @ret_addr: return address
68 * @timestamp: timestamp (if known)
69 * @ref: external reference (e.g. db_id of sample)
70 * @branch_count: the branch count when the entry was created
71 * @cp: call path
72 * @no_call: a 'call' was not seen
73 */
74struct thread_stack_entry {
75 u64 ret_addr;
76 u64 timestamp;
77 u64 ref;
78 u64 branch_count;
79 struct call_path *cp;
80 bool no_call;
81};
82
83/**
84 * struct thread_stack - thread stack constructed from 'call' and 'return'
85 * branch samples.
86 * @stack: array that holds the stack
87 * @cnt: number of entries in the stack
88 * @sz: current maximum stack size
89 * @trace_nr: current trace number
90 * @branch_count: running branch count
91 * @kernel_start: kernel start address
92 * @last_time: last timestamp
93 * @crp: call/return processor
94 * @comm: current comm
95 */
96struct thread_stack {
97 struct thread_stack_entry *stack;
98 size_t cnt;
99 size_t sz;
100 u64 trace_nr;
101 u64 branch_count;
102 u64 kernel_start;
103 u64 last_time;
104 struct call_return_processor *crp;
105 struct comm *comm;
106};
107
108static int thread_stack__grow(struct thread_stack *ts)
109{
110 struct thread_stack_entry *new_stack;
111 size_t sz, new_sz;
112
113 new_sz = ts->sz + STACK_GROWTH;
114 sz = new_sz * sizeof(struct thread_stack_entry);
115
116 new_stack = realloc(ts->stack, sz);
117 if (!new_stack)
118 return -ENOMEM;
119
120 ts->stack = new_stack;
121 ts->sz = new_sz;
122
123 return 0;
124}
125
126static struct thread_stack *thread_stack__new(struct thread *thread,
127 struct call_return_processor *crp)
128{
129 struct thread_stack *ts;
130
131 ts = zalloc(sizeof(struct thread_stack));
132 if (!ts)
133 return NULL;
134
135 if (thread_stack__grow(ts)) {
136 free(ts);
137 return NULL;
138 }
139
140 if (thread->mg && thread->mg->machine)
141 ts->kernel_start = machine__kernel_start(thread->mg->machine);
142 else
143 ts->kernel_start = 1ULL << 63;
144 ts->crp = crp;
145
146 return ts;
147}
148
149static int thread_stack__push(struct thread_stack *ts, u64 ret_addr)
150{
151 int err = 0;
152
153 if (ts->cnt == ts->sz) {
154 err = thread_stack__grow(ts);
155 if (err) {
156 pr_warning("Out of memory: discarding thread stack\n");
157 ts->cnt = 0;
158 }
159 }
160
161 ts->stack[ts->cnt++].ret_addr = ret_addr;
162
163 return err;
164}
165
166static void thread_stack__pop(struct thread_stack *ts, u64 ret_addr)
167{
168 size_t i;
169
170 /*
171 * In some cases there may be functions which are not seen to return.
172 * For example when setjmp / longjmp has been used. Or the perf context
173 * switch in the kernel which doesn't stop and start tracing in exactly
174 * the same code path. When that happens the return address will be
175 * further down the stack. If the return address is not found at all,
176 * we assume the opposite (i.e. this is a return for a call that wasn't
177 * seen for some reason) and leave the stack alone.
178 */
179 for (i = ts->cnt; i; ) {
180 if (ts->stack[--i].ret_addr == ret_addr) {
181 ts->cnt = i;
182 return;
183 }
184 }
185}
186
187static bool thread_stack__in_kernel(struct thread_stack *ts)
188{
189 if (!ts->cnt)
190 return false;
191
192 return ts->stack[ts->cnt - 1].cp->in_kernel;
193}
194
195static int thread_stack__call_return(struct thread *thread,
196 struct thread_stack *ts, size_t idx,
197 u64 timestamp, u64 ref, bool no_return)
198{
199 struct call_return_processor *crp = ts->crp;
200 struct thread_stack_entry *tse;
201 struct call_return cr = {
202 .thread = thread,
203 .comm = ts->comm,
204 .db_id = 0,
205 };
206
207 tse = &ts->stack[idx];
208 cr.cp = tse->cp;
209 cr.call_time = tse->timestamp;
210 cr.return_time = timestamp;
211 cr.branch_count = ts->branch_count - tse->branch_count;
212 cr.call_ref = tse->ref;
213 cr.return_ref = ref;
214 if (tse->no_call)
215 cr.flags |= CALL_RETURN_NO_CALL;
216 if (no_return)
217 cr.flags |= CALL_RETURN_NO_RETURN;
218
219 return crp->process(&cr, crp->data);
220}
221
222static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
223{
224 struct call_return_processor *crp = ts->crp;
225 int err;
226
227 if (!crp) {
228 ts->cnt = 0;
229 return 0;
230 }
231
232 while (ts->cnt) {
233 err = thread_stack__call_return(thread, ts, --ts->cnt,
234 ts->last_time, 0, true);
235 if (err) {
236 pr_err("Error flushing thread stack!\n");
237 ts->cnt = 0;
238 return err;
239 }
240 }
241
242 return 0;
243}
244
245int thread_stack__flush(struct thread *thread)
246{
247 if (thread->ts)
248 return __thread_stack__flush(thread, thread->ts);
249
250 return 0;
251}
252
253int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
254 u64 to_ip, u16 insn_len, u64 trace_nr)
255{
256 if (!thread)
257 return -EINVAL;
258
259 if (!thread->ts) {
260 thread->ts = thread_stack__new(thread, NULL);
261 if (!thread->ts) {
262 pr_warning("Out of memory: no thread stack\n");
263 return -ENOMEM;
264 }
265 thread->ts->trace_nr = trace_nr;
266 }
267
268 /*
269 * When the trace is discontinuous, the trace_nr changes. In that case
270 * the stack might be completely invalid. Better to report nothing than
271 * to report something misleading, so flush the stack.
272 */
273 if (trace_nr != thread->ts->trace_nr) {
274 if (thread->ts->trace_nr)
275 __thread_stack__flush(thread, thread->ts);
276 thread->ts->trace_nr = trace_nr;
277 }
278
279 /* Stop here if thread_stack__process() is in use */
280 if (thread->ts->crp)
281 return 0;
282
283 if (flags & PERF_IP_FLAG_CALL) {
284 u64 ret_addr;
285
286 if (!to_ip)
287 return 0;
288 ret_addr = from_ip + insn_len;
289 if (ret_addr == to_ip)
290 return 0; /* Zero-length calls are excluded */
291 return thread_stack__push(thread->ts, ret_addr);
292 } else if (flags & PERF_IP_FLAG_RETURN) {
293 if (!from_ip)
294 return 0;
295 thread_stack__pop(thread->ts, to_ip);
296 }
297
298 return 0;
299}
300
301void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
302{
303 if (!thread || !thread->ts)
304 return;
305
306 if (trace_nr != thread->ts->trace_nr) {
307 if (thread->ts->trace_nr)
308 __thread_stack__flush(thread, thread->ts);
309 thread->ts->trace_nr = trace_nr;
310 }
311}
312
313void thread_stack__free(struct thread *thread)
314{
315 if (thread->ts) {
316 __thread_stack__flush(thread, thread->ts);
317 zfree(&thread->ts->stack);
318 zfree(&thread->ts);
319 }
320}
321
322void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
323 size_t sz, u64 ip)
324{
325 size_t i;
326
327 if (!thread || !thread->ts)
328 chain->nr = 1;
329 else
330 chain->nr = min(sz, thread->ts->cnt + 1);
331
332 chain->ips[0] = ip;
333
334 for (i = 1; i < chain->nr; i++)
335 chain->ips[i] = thread->ts->stack[thread->ts->cnt - i].ret_addr;
336}
337
338static void call_path__init(struct call_path *cp, struct call_path *parent,
339 struct symbol *sym, u64 ip, bool in_kernel)
340{
341 cp->parent = parent;
342 cp->sym = sym;
343 cp->ip = sym ? 0 : ip;
344 cp->db_id = 0;
345 cp->in_kernel = in_kernel;
346 RB_CLEAR_NODE(&cp->rb_node);
347 cp->children = RB_ROOT;
348}
349
350static struct call_path_root *call_path_root__new(void)
351{
352 struct call_path_root *cpr;
353
354 cpr = zalloc(sizeof(struct call_path_root));
355 if (!cpr)
356 return NULL;
357 call_path__init(&cpr->call_path, NULL, NULL, 0, false);
358 INIT_LIST_HEAD(&cpr->blocks);
359 return cpr;
360}
361
362static void call_path_root__free(struct call_path_root *cpr)
363{
364 struct call_path_block *pos, *n;
365
366 list_for_each_entry_safe(pos, n, &cpr->blocks, node) {
367 list_del(&pos->node);
368 free(pos);
369 }
370 free(cpr);
371}
372
373static struct call_path *call_path__new(struct call_path_root *cpr,
374 struct call_path *parent,
375 struct symbol *sym, u64 ip,
376 bool in_kernel)
377{
378 struct call_path_block *cpb;
379 struct call_path *cp;
380 size_t n;
381
382 if (cpr->next < cpr->sz) {
383 cpb = list_last_entry(&cpr->blocks, struct call_path_block,
384 node);
385 } else {
386 cpb = zalloc(sizeof(struct call_path_block));
387 if (!cpb)
388 return NULL;
389 list_add_tail(&cpb->node, &cpr->blocks);
390 cpr->sz += CALL_PATH_BLOCK_SIZE;
391 }
392
393 n = cpr->next++ & CALL_PATH_BLOCK_MASK;
394 cp = &cpb->cp[n];
395
396 call_path__init(cp, parent, sym, ip, in_kernel);
397
398 return cp;
399}
400
401static struct call_path *call_path__findnew(struct call_path_root *cpr,
402 struct call_path *parent,
403 struct symbol *sym, u64 ip, u64 ks)
404{
405 struct rb_node **p;
406 struct rb_node *node_parent = NULL;
407 struct call_path *cp;
408 bool in_kernel = ip >= ks;
409
410 if (sym)
411 ip = 0;
412
413 if (!parent)
414 return call_path__new(cpr, parent, sym, ip, in_kernel);
415
416 p = &parent->children.rb_node;
417 while (*p != NULL) {
418 node_parent = *p;
419 cp = rb_entry(node_parent, struct call_path, rb_node);
420
421 if (cp->sym == sym && cp->ip == ip)
422 return cp;
423
424 if (sym < cp->sym || (sym == cp->sym && ip < cp->ip))
425 p = &(*p)->rb_left;
426 else
427 p = &(*p)->rb_right;
428 }
429
430 cp = call_path__new(cpr, parent, sym, ip, in_kernel);
431 if (!cp)
432 return NULL;
433
434 rb_link_node(&cp->rb_node, node_parent, p);
435 rb_insert_color(&cp->rb_node, &parent->children);
436
437 return cp;
438}
439
440struct call_return_processor *
441call_return_processor__new(int (*process)(struct call_return *cr, void *data),
442 void *data)
443{
444 struct call_return_processor *crp;
445
446 crp = zalloc(sizeof(struct call_return_processor));
447 if (!crp)
448 return NULL;
449 crp->cpr = call_path_root__new();
450 if (!crp->cpr)
451 goto out_free;
452 crp->process = process;
453 crp->data = data;
454 return crp;
455
456out_free:
457 free(crp);
458 return NULL;
459}
460
461void call_return_processor__free(struct call_return_processor *crp)
462{
463 if (crp) {
464 call_path_root__free(crp->cpr);
465 free(crp);
466 }
467}
468
469static int thread_stack__push_cp(struct thread_stack *ts, u64 ret_addr,
470 u64 timestamp, u64 ref, struct call_path *cp,
471 bool no_call)
472{
473 struct thread_stack_entry *tse;
474 int err;
475
476 if (ts->cnt == ts->sz) {
477 err = thread_stack__grow(ts);
478 if (err)
479 return err;
480 }
481
482 tse = &ts->stack[ts->cnt++];
483 tse->ret_addr = ret_addr;
484 tse->timestamp = timestamp;
485 tse->ref = ref;
486 tse->branch_count = ts->branch_count;
487 tse->cp = cp;
488 tse->no_call = no_call;
489
490 return 0;
491}
492
493static int thread_stack__pop_cp(struct thread *thread, struct thread_stack *ts,
494 u64 ret_addr, u64 timestamp, u64 ref,
495 struct symbol *sym)
496{
497 int err;
498
499 if (!ts->cnt)
500 return 1;
501
502 if (ts->cnt == 1) {
503 struct thread_stack_entry *tse = &ts->stack[0];
504
505 if (tse->cp->sym == sym)
506 return thread_stack__call_return(thread, ts, --ts->cnt,
507 timestamp, ref, false);
508 }
509
510 if (ts->stack[ts->cnt - 1].ret_addr == ret_addr) {
511 return thread_stack__call_return(thread, ts, --ts->cnt,
512 timestamp, ref, false);
513 } else {
514 size_t i = ts->cnt - 1;
515
516 while (i--) {
517 if (ts->stack[i].ret_addr != ret_addr)
518 continue;
519 i += 1;
520 while (ts->cnt > i) {
521 err = thread_stack__call_return(thread, ts,
522 --ts->cnt,
523 timestamp, ref,
524 true);
525 if (err)
526 return err;
527 }
528 return thread_stack__call_return(thread, ts, --ts->cnt,
529 timestamp, ref, false);
530 }
531 }
532
533 return 1;
534}
535
536static int thread_stack__bottom(struct thread *thread, struct thread_stack *ts,
537 struct perf_sample *sample,
538 struct addr_location *from_al,
539 struct addr_location *to_al, u64 ref)
540{
541 struct call_path_root *cpr = ts->crp->cpr;
542 struct call_path *cp;
543 struct symbol *sym;
544 u64 ip;
545
546 if (sample->ip) {
547 ip = sample->ip;
548 sym = from_al->sym;
549 } else if (sample->addr) {
550 ip = sample->addr;
551 sym = to_al->sym;
552 } else {
553 return 0;
554 }
555
556 cp = call_path__findnew(cpr, &cpr->call_path, sym, ip,
557 ts->kernel_start);
558 if (!cp)
559 return -ENOMEM;
560
561 return thread_stack__push_cp(thread->ts, ip, sample->time, ref, cp,
562 true);
563}
564
565static int thread_stack__no_call_return(struct thread *thread,
566 struct thread_stack *ts,
567 struct perf_sample *sample,
568 struct addr_location *from_al,
569 struct addr_location *to_al, u64 ref)
570{
571 struct call_path_root *cpr = ts->crp->cpr;
572 struct call_path *cp, *parent;
573 u64 ks = ts->kernel_start;
574 int err;
575
576 if (sample->ip >= ks && sample->addr < ks) {
577 /* Return to userspace, so pop all kernel addresses */
578 while (thread_stack__in_kernel(ts)) {
579 err = thread_stack__call_return(thread, ts, --ts->cnt,
580 sample->time, ref,
581 true);
582 if (err)
583 return err;
584 }
585
586 /* If the stack is empty, push the userspace address */
587 if (!ts->cnt) {
588 cp = call_path__findnew(cpr, &cpr->call_path,
589 to_al->sym, sample->addr,
590 ts->kernel_start);
591 if (!cp)
592 return -ENOMEM;
593 return thread_stack__push_cp(ts, 0, sample->time, ref,
594 cp, true);
595 }
596 } else if (thread_stack__in_kernel(ts) && sample->ip < ks) {
597 /* Return to userspace, so pop all kernel addresses */
598 while (thread_stack__in_kernel(ts)) {
599 err = thread_stack__call_return(thread, ts, --ts->cnt,
600 sample->time, ref,
601 true);
602 if (err)
603 return err;
604 }
605 }
606
607 if (ts->cnt)
608 parent = ts->stack[ts->cnt - 1].cp;
609 else
610 parent = &cpr->call_path;
611
612 /* This 'return' had no 'call', so push and pop top of stack */
613 cp = call_path__findnew(cpr, parent, from_al->sym, sample->ip,
614 ts->kernel_start);
615 if (!cp)
616 return -ENOMEM;
617
618 err = thread_stack__push_cp(ts, sample->addr, sample->time, ref, cp,
619 true);
620 if (err)
621 return err;
622
623 return thread_stack__pop_cp(thread, ts, sample->addr, sample->time, ref,
624 to_al->sym);
625}
626
627static int thread_stack__trace_begin(struct thread *thread,
628 struct thread_stack *ts, u64 timestamp,
629 u64 ref)
630{
631 struct thread_stack_entry *tse;
632 int err;
633
634 if (!ts->cnt)
635 return 0;
636
637 /* Pop trace end */
638 tse = &ts->stack[ts->cnt - 1];
639 if (tse->cp->sym == NULL && tse->cp->ip == 0) {
640 err = thread_stack__call_return(thread, ts, --ts->cnt,
641 timestamp, ref, false);
642 if (err)
643 return err;
644 }
645
646 return 0;
647}
648
649static int thread_stack__trace_end(struct thread_stack *ts,
650 struct perf_sample *sample, u64 ref)
651{
652 struct call_path_root *cpr = ts->crp->cpr;
653 struct call_path *cp;
654 u64 ret_addr;
655
656 /* No point having 'trace end' on the bottom of the stack */
657 if (!ts->cnt || (ts->cnt == 1 && ts->stack[0].ref == ref))
658 return 0;
659
660 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp, NULL, 0,
661 ts->kernel_start);
662 if (!cp)
663 return -ENOMEM;
664
665 ret_addr = sample->ip + sample->insn_len;
666
667 return thread_stack__push_cp(ts, ret_addr, sample->time, ref, cp,
668 false);
669}
670
671int thread_stack__process(struct thread *thread, struct comm *comm,
672 struct perf_sample *sample,
673 struct addr_location *from_al,
674 struct addr_location *to_al, u64 ref,
675 struct call_return_processor *crp)
676{
677 struct thread_stack *ts = thread->ts;
678 int err = 0;
679
680 if (ts) {
681 if (!ts->crp) {
682 /* Supersede thread_stack__event() */
683 thread_stack__free(thread);
684 thread->ts = thread_stack__new(thread, crp);
685 if (!thread->ts)
686 return -ENOMEM;
687 ts = thread->ts;
688 ts->comm = comm;
689 }
690 } else {
691 thread->ts = thread_stack__new(thread, crp);
692 if (!thread->ts)
693 return -ENOMEM;
694 ts = thread->ts;
695 ts->comm = comm;
696 }
697
698 /* Flush stack on exec */
699 if (ts->comm != comm && thread->pid_ == thread->tid) {
700 err = __thread_stack__flush(thread, ts);
701 if (err)
702 return err;
703 ts->comm = comm;
704 }
705
706 /* If the stack is empty, put the current symbol on the stack */
707 if (!ts->cnt) {
708 err = thread_stack__bottom(thread, ts, sample, from_al, to_al,
709 ref);
710 if (err)
711 return err;
712 }
713
714 ts->branch_count += 1;
715 ts->last_time = sample->time;
716
717 if (sample->flags & PERF_IP_FLAG_CALL) {
718 struct call_path_root *cpr = ts->crp->cpr;
719 struct call_path *cp;
720 u64 ret_addr;
721
722 if (!sample->ip || !sample->addr)
723 return 0;
724
725 ret_addr = sample->ip + sample->insn_len;
726 if (ret_addr == sample->addr)
727 return 0; /* Zero-length calls are excluded */
728
729 cp = call_path__findnew(cpr, ts->stack[ts->cnt - 1].cp,
730 to_al->sym, sample->addr,
731 ts->kernel_start);
732 if (!cp)
733 return -ENOMEM;
734 err = thread_stack__push_cp(ts, ret_addr, sample->time, ref,
735 cp, false);
736 } else if (sample->flags & PERF_IP_FLAG_RETURN) {
737 if (!sample->ip || !sample->addr)
738 return 0;
739
740 err = thread_stack__pop_cp(thread, ts, sample->addr,
741 sample->time, ref, from_al->sym);
742 if (err) {
743 if (err < 0)
744 return err;
745 err = thread_stack__no_call_return(thread, ts, sample,
746 from_al, to_al, ref);
747 }
748 } else if (sample->flags & PERF_IP_FLAG_TRACE_BEGIN) {
749 err = thread_stack__trace_begin(thread, ts, sample->time, ref);
750 } else if (sample->flags & PERF_IP_FLAG_TRACE_END) {
751 err = thread_stack__trace_end(ts, sample, ref);
752 }
753
754 return err;
755}