Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008 ARM Limited
4 * Copyright (C) 2014 Regents of the University of California
5 */
6
7#include <linux/export.h>
8#include <linux/kallsyms.h>
9#include <linux/sched.h>
10#include <linux/sched/debug.h>
11#include <linux/sched/task_stack.h>
12#include <linux/stacktrace.h>
13#include <linux/ftrace.h>
14
15#ifdef CONFIG_FRAME_POINTER
16
17struct stackframe {
18 unsigned long fp;
19 unsigned long ra;
20};
21
22void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
23 bool (*fn)(unsigned long, void *), void *arg)
24{
25 unsigned long fp, sp, pc;
26
27 if (regs) {
28 fp = frame_pointer(regs);
29 sp = user_stack_pointer(regs);
30 pc = instruction_pointer(regs);
31 } else if (task == NULL || task == current) {
32 const register unsigned long current_sp __asm__ ("sp");
33 fp = (unsigned long)__builtin_frame_address(0);
34 sp = current_sp;
35 pc = (unsigned long)walk_stackframe;
36 } else {
37 /* task blocked in __switch_to */
38 fp = task->thread.s[0];
39 sp = task->thread.sp;
40 pc = task->thread.ra;
41 }
42
43 for (;;) {
44 unsigned long low, high;
45 struct stackframe *frame;
46
47 if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
48 break;
49
50 /* Validate frame pointer */
51 low = sp + sizeof(struct stackframe);
52 high = ALIGN(sp, THREAD_SIZE);
53 if (unlikely(fp < low || fp > high || fp & 0x7))
54 break;
55 /* Unwind stack frame */
56 frame = (struct stackframe *)fp - 1;
57 sp = fp;
58 fp = frame->fp;
59 pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
60 (unsigned long *)(fp - 8));
61 }
62}
63
64#else /* !CONFIG_FRAME_POINTER */
65
66static void notrace walk_stackframe(struct task_struct *task,
67 struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
68{
69 unsigned long sp, pc;
70 unsigned long *ksp;
71
72 if (regs) {
73 sp = user_stack_pointer(regs);
74 pc = instruction_pointer(regs);
75 } else if (task == NULL || task == current) {
76 const register unsigned long current_sp __asm__ ("sp");
77 sp = current_sp;
78 pc = (unsigned long)walk_stackframe;
79 } else {
80 /* task blocked in __switch_to */
81 sp = task->thread.sp;
82 pc = task->thread.ra;
83 }
84
85 if (unlikely(sp & 0x7))
86 return;
87
88 ksp = (unsigned long *)sp;
89 while (!kstack_end(ksp)) {
90 if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
91 break;
92 pc = (*ksp++) - 0x4;
93 }
94}
95
96#endif /* CONFIG_FRAME_POINTER */
97
98
99static bool print_trace_address(unsigned long pc, void *arg)
100{
101 print_ip_sym(pc);
102 return false;
103}
104
105void show_stack(struct task_struct *task, unsigned long *sp)
106{
107 pr_cont("Call Trace:\n");
108 walk_stackframe(task, NULL, print_trace_address, NULL);
109}
110
111
112static bool save_wchan(unsigned long pc, void *arg)
113{
114 if (!in_sched_functions(pc)) {
115 unsigned long *p = arg;
116 *p = pc;
117 return true;
118 }
119 return false;
120}
121
122unsigned long get_wchan(struct task_struct *task)
123{
124 unsigned long pc = 0;
125
126 if (likely(task && task != current && task->state != TASK_RUNNING))
127 walk_stackframe(task, NULL, save_wchan, &pc);
128 return pc;
129}
130
131
132#ifdef CONFIG_STACKTRACE
133
134static bool __save_trace(unsigned long pc, void *arg, bool nosched)
135{
136 struct stack_trace *trace = arg;
137
138 if (unlikely(nosched && in_sched_functions(pc)))
139 return false;
140 if (unlikely(trace->skip > 0)) {
141 trace->skip--;
142 return false;
143 }
144
145 trace->entries[trace->nr_entries++] = pc;
146 return (trace->nr_entries >= trace->max_entries);
147}
148
149static bool save_trace(unsigned long pc, void *arg)
150{
151 return __save_trace(pc, arg, false);
152}
153
154/*
155 * Save stack-backtrace addresses into a stack_trace buffer.
156 */
157void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
158{
159 walk_stackframe(tsk, NULL, save_trace, trace);
160}
161EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
162
163void save_stack_trace(struct stack_trace *trace)
164{
165 save_stack_trace_tsk(NULL, trace);
166}
167EXPORT_SYMBOL_GPL(save_stack_trace);
168
169#endif /* CONFIG_STACKTRACE */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008 ARM Limited
4 * Copyright (C) 2014 Regents of the University of California
5 */
6
7#include <linux/export.h>
8#include <linux/kallsyms.h>
9#include <linux/sched.h>
10#include <linux/sched/debug.h>
11#include <linux/sched/task_stack.h>
12#include <linux/stacktrace.h>
13#include <linux/ftrace.h>
14
15#include <asm/stacktrace.h>
16
17register unsigned long sp_in_global __asm__("sp");
18
19#ifdef CONFIG_FRAME_POINTER
20
21void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
22 bool (*fn)(void *, unsigned long), void *arg)
23{
24 unsigned long fp, sp, pc;
25
26 if (regs) {
27 fp = frame_pointer(regs);
28 sp = user_stack_pointer(regs);
29 pc = instruction_pointer(regs);
30 } else if (task == NULL || task == current) {
31 fp = (unsigned long)__builtin_frame_address(1);
32 sp = (unsigned long)__builtin_frame_address(0);
33 pc = (unsigned long)__builtin_return_address(0);
34 } else {
35 /* task blocked in __switch_to */
36 fp = task->thread.s[0];
37 sp = task->thread.sp;
38 pc = task->thread.ra;
39 }
40
41 for (;;) {
42 unsigned long low, high;
43 struct stackframe *frame;
44
45 if (unlikely(!__kernel_text_address(pc) || !fn(arg, pc)))
46 break;
47
48 /* Validate frame pointer */
49 low = sp + sizeof(struct stackframe);
50 high = ALIGN(sp, THREAD_SIZE);
51 if (unlikely(fp < low || fp > high || fp & 0x7))
52 break;
53 /* Unwind stack frame */
54 frame = (struct stackframe *)fp - 1;
55 sp = fp;
56 if (regs && (regs->epc == pc) && (frame->fp & 0x7)) {
57 fp = frame->ra;
58 pc = regs->ra;
59 } else {
60 fp = frame->fp;
61 pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
62 (unsigned long *)(fp - 8));
63 }
64
65 }
66}
67
68#else /* !CONFIG_FRAME_POINTER */
69
70void notrace walk_stackframe(struct task_struct *task,
71 struct pt_regs *regs, bool (*fn)(void *, unsigned long), void *arg)
72{
73 unsigned long sp, pc;
74 unsigned long *ksp;
75
76 if (regs) {
77 sp = user_stack_pointer(regs);
78 pc = instruction_pointer(regs);
79 } else if (task == NULL || task == current) {
80 sp = sp_in_global;
81 pc = (unsigned long)walk_stackframe;
82 } else {
83 /* task blocked in __switch_to */
84 sp = task->thread.sp;
85 pc = task->thread.ra;
86 }
87
88 if (unlikely(sp & 0x7))
89 return;
90
91 ksp = (unsigned long *)sp;
92 while (!kstack_end(ksp)) {
93 if (__kernel_text_address(pc) && unlikely(!fn(arg, pc)))
94 break;
95 pc = (*ksp++) - 0x4;
96 }
97}
98
99#endif /* CONFIG_FRAME_POINTER */
100
101static bool print_trace_address(void *arg, unsigned long pc)
102{
103 const char *loglvl = arg;
104
105 print_ip_sym(loglvl, pc);
106 return true;
107}
108
109noinline void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
110 const char *loglvl)
111{
112 walk_stackframe(task, regs, print_trace_address, (void *)loglvl);
113}
114
115void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
116{
117 pr_cont("%sCall Trace:\n", loglvl);
118 dump_backtrace(NULL, task, loglvl);
119}
120
121static bool save_wchan(void *arg, unsigned long pc)
122{
123 if (!in_sched_functions(pc)) {
124 unsigned long *p = arg;
125 *p = pc;
126 return false;
127 }
128 return true;
129}
130
131unsigned long get_wchan(struct task_struct *task)
132{
133 unsigned long pc = 0;
134
135 if (likely(task && task != current && !task_is_running(task))) {
136 if (!try_get_task_stack(task))
137 return 0;
138 walk_stackframe(task, NULL, save_wchan, &pc);
139 put_task_stack(task);
140 }
141 return pc;
142}
143
144#ifdef CONFIG_STACKTRACE
145
146noinline void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
147 struct task_struct *task, struct pt_regs *regs)
148{
149 walk_stackframe(task, regs, consume_entry, cookie);
150}
151
152#endif /* CONFIG_STACKTRACE */