Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */
3
4#include <linux/sched/debug.h>
5#include <linux/sched/task_stack.h>
6#include <linux/stacktrace.h>
7#include <linux/ftrace.h>
8
9void save_stack_trace(struct stack_trace *trace)
10{
11 save_stack_trace_tsk(current, trace);
12}
13EXPORT_SYMBOL_GPL(save_stack_trace);
14
15void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
16{
17 unsigned long *fp, *stack_start, *stack_end;
18 unsigned long addr;
19 int skip = trace->skip;
20 int savesched;
21 int graph_idx = 0;
22
23 if (tsk == current) {
24 asm volatile("mov %0, r8\n":"=r"(fp));
25 savesched = 1;
26 } else {
27 fp = (unsigned long *)thread_saved_fp(tsk);
28 savesched = 0;
29 }
30
31 addr = (unsigned long) fp & THREAD_MASK;
32 stack_start = (unsigned long *) addr;
33 stack_end = (unsigned long *) (addr + THREAD_SIZE);
34
35 while (fp > stack_start && fp < stack_end) {
36 unsigned long lpp, fpp;
37
38 fpp = fp[0];
39 lpp = fp[1];
40 if (!__kernel_text_address(lpp))
41 break;
42 else
43 lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
44
45 if (savesched || !in_sched_functions(lpp)) {
46 if (skip) {
47 skip--;
48 } else {
49 trace->entries[trace->nr_entries++] = lpp;
50 if (trace->nr_entries >= trace->max_entries)
51 break;
52 }
53 }
54 fp = (unsigned long *)fpp;
55 }
56}
57EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
1// SPDX-License-Identifier: GPL-2.0
2
3#include <linux/sched/debug.h>
4#include <linux/sched/task_stack.h>
5#include <linux/stacktrace.h>
6#include <linux/ftrace.h>
7#include <linux/ptrace.h>
8
9#ifdef CONFIG_FRAME_POINTER
10
11struct stackframe {
12 unsigned long fp;
13 unsigned long ra;
14};
15
16void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
17 bool (*fn)(unsigned long, void *), void *arg)
18{
19 unsigned long fp, sp, pc;
20
21 if (regs) {
22 fp = frame_pointer(regs);
23 sp = user_stack_pointer(regs);
24 pc = instruction_pointer(regs);
25 } else if (task == NULL || task == current) {
26 const register unsigned long current_sp __asm__ ("sp");
27 const register unsigned long current_fp __asm__ ("r8");
28 fp = current_fp;
29 sp = current_sp;
30 pc = (unsigned long)walk_stackframe;
31 } else {
32 /* task blocked in __switch_to */
33 fp = thread_saved_fp(task);
34 sp = thread_saved_sp(task);
35 pc = thread_saved_lr(task);
36 }
37
38 for (;;) {
39 unsigned long low, high;
40 struct stackframe *frame;
41
42 if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
43 break;
44
45 /* Validate frame pointer */
46 low = sp;
47 high = ALIGN(sp, THREAD_SIZE);
48 if (unlikely(fp < low || fp > high || fp & 0x3))
49 break;
50 /* Unwind stack frame */
51 frame = (struct stackframe *)fp;
52 sp = fp;
53 fp = frame->fp;
54 pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
55 (unsigned long *)(fp - 8));
56 }
57}
58
59#else /* !CONFIG_FRAME_POINTER */
60
61static void notrace walk_stackframe(struct task_struct *task,
62 struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
63{
64 unsigned long sp, pc;
65 unsigned long *ksp;
66
67 if (regs) {
68 sp = user_stack_pointer(regs);
69 pc = instruction_pointer(regs);
70 } else if (task == NULL || task == current) {
71 const register unsigned long current_sp __asm__ ("sp");
72 sp = current_sp;
73 pc = (unsigned long)walk_stackframe;
74 } else {
75 /* task blocked in __switch_to */
76 sp = thread_saved_sp(task);
77 pc = thread_saved_lr(task);
78 }
79
80 if (unlikely(sp & 0x3))
81 return;
82
83 ksp = (unsigned long *)sp;
84 while (!kstack_end(ksp)) {
85 if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
86 break;
87 pc = (*ksp++) - 0x4;
88 }
89}
90#endif /* CONFIG_FRAME_POINTER */
91
92static bool print_trace_address(unsigned long pc, void *arg)
93{
94 print_ip_sym((const char *)arg, pc);
95 return false;
96}
97
98void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
99{
100 pr_cont("Call Trace:\n");
101 walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
102}
103
104static bool save_wchan(unsigned long pc, void *arg)
105{
106 if (!in_sched_functions(pc)) {
107 unsigned long *p = arg;
108 *p = pc;
109 return true;
110 }
111 return false;
112}
113
114unsigned long get_wchan(struct task_struct *task)
115{
116 unsigned long pc = 0;
117
118 if (likely(task && task != current && task->state != TASK_RUNNING))
119 walk_stackframe(task, NULL, save_wchan, &pc);
120 return pc;
121}
122
123#ifdef CONFIG_STACKTRACE
124static bool __save_trace(unsigned long pc, void *arg, bool nosched)
125{
126 struct stack_trace *trace = arg;
127
128 if (unlikely(nosched && in_sched_functions(pc)))
129 return false;
130 if (unlikely(trace->skip > 0)) {
131 trace->skip--;
132 return false;
133 }
134
135 trace->entries[trace->nr_entries++] = pc;
136 return (trace->nr_entries >= trace->max_entries);
137}
138
139static bool save_trace(unsigned long pc, void *arg)
140{
141 return __save_trace(pc, arg, false);
142}
143
144/*
145 * Save stack-backtrace addresses into a stack_trace buffer.
146 */
147void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
148{
149 walk_stackframe(tsk, NULL, save_trace, trace);
150}
151EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
152
153void save_stack_trace(struct stack_trace *trace)
154{
155 save_stack_trace_tsk(NULL, trace);
156}
157EXPORT_SYMBOL_GPL(save_stack_trace);
158
159#endif /* CONFIG_STACKTRACE */