Linux Audio

Check our new training course

Loading...
v3.5.6
  1/*
  2 * Stack trace management functions
  3 *
  4 *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5 */
  6#include <linux/sched.h>
 
 
  7#include <linux/stacktrace.h>
  8#include <linux/module.h>
  9#include <linux/uaccess.h>
 10#include <asm/stacktrace.h>
 
 11
 12static int save_stack_stack(void *data, char *name)
 
 13{
 14	return 0;
 15}
 16
 17static void
 18__save_stack_address(void *data, unsigned long addr, bool reliable, bool nosched)
 19{
 20	struct stack_trace *trace = data;
 21#ifdef CONFIG_FRAME_POINTER
 22	if (!reliable)
 23		return;
 24#endif
 25	if (nosched && in_sched_functions(addr))
 26		return;
 
 27	if (trace->skip > 0) {
 28		trace->skip--;
 29		return;
 30	}
 31	if (trace->nr_entries < trace->max_entries)
 32		trace->entries[trace->nr_entries++] = addr;
 33}
 34
 35static void save_stack_address(void *data, unsigned long addr, int reliable)
 36{
 37	return __save_stack_address(data, addr, reliable, false);
 38}
 39
 40static void
 41save_stack_address_nosched(void *data, unsigned long addr, int reliable)
 42{
 43	return __save_stack_address(data, addr, reliable, true);
 44}
 45
 46static const struct stacktrace_ops save_stack_ops = {
 47	.stack		= save_stack_stack,
 48	.address	= save_stack_address,
 49	.walk_stack	= print_context_stack,
 50};
 
 
 
 
 
 
 
 
 
 
 
 51
 52static const struct stacktrace_ops save_stack_ops_nosched = {
 53	.stack		= save_stack_stack,
 54	.address	= save_stack_address_nosched,
 55	.walk_stack	= print_context_stack,
 56};
 57
 58/*
 59 * Save stack-backtrace addresses into a stack_trace buffer.
 60 */
 61void save_stack_trace(struct stack_trace *trace)
 62{
 63	dump_trace(current, NULL, NULL, 0, &save_stack_ops, trace);
 64	if (trace->nr_entries < trace->max_entries)
 65		trace->entries[trace->nr_entries++] = ULONG_MAX;
 66}
 67EXPORT_SYMBOL_GPL(save_stack_trace);
 68
 69void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
 70{
 71	dump_trace(current, regs, NULL, 0, &save_stack_ops, trace);
 72	if (trace->nr_entries < trace->max_entries)
 73		trace->entries[trace->nr_entries++] = ULONG_MAX;
 74}
 75
 76void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 77{
 78	dump_trace(tsk, NULL, NULL, 0, &save_stack_ops_nosched, trace);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79	if (trace->nr_entries < trace->max_entries)
 80		trace->entries[trace->nr_entries++] = ULONG_MAX;
 
 
 81}
 82EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 83
 84/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
 85
 86struct stack_frame_user {
 87	const void __user	*next_fp;
 88	unsigned long		ret_addr;
 89};
 90
 91static int
 92copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
 93{
 94	int ret;
 95
 96	if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
 97		return 0;
 98
 99	ret = 1;
100	pagefault_disable();
101	if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
102		ret = 0;
103	pagefault_enable();
104
105	return ret;
106}
107
108static inline void __save_stack_trace_user(struct stack_trace *trace)
109{
110	const struct pt_regs *regs = task_pt_regs(current);
111	const void __user *fp = (const void __user *)regs->bp;
112
113	if (trace->nr_entries < trace->max_entries)
114		trace->entries[trace->nr_entries++] = regs->ip;
115
116	while (trace->nr_entries < trace->max_entries) {
117		struct stack_frame_user frame;
118
119		frame.next_fp = NULL;
120		frame.ret_addr = 0;
121		if (!copy_stack_frame(fp, &frame))
122			break;
123		if ((unsigned long)fp < regs->sp)
124			break;
125		if (frame.ret_addr) {
126			trace->entries[trace->nr_entries++] =
127				frame.ret_addr;
128		}
129		if (fp == frame.next_fp)
130			break;
131		fp = frame.next_fp;
132	}
133}
134
135void save_stack_trace_user(struct stack_trace *trace)
136{
137	/*
138	 * Trace user stack if we are not a kernel thread
139	 */
140	if (current->mm) {
141		__save_stack_trace_user(trace);
142	}
143	if (trace->nr_entries < trace->max_entries)
144		trace->entries[trace->nr_entries++] = ULONG_MAX;
145}
146
v4.17
  1/*
  2 * Stack trace management functions
  3 *
  4 *  Copyright (C) 2006-2009 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
  5 */
  6#include <linux/sched.h>
  7#include <linux/sched/debug.h>
  8#include <linux/sched/task_stack.h>
  9#include <linux/stacktrace.h>
 10#include <linux/export.h>
 11#include <linux/uaccess.h>
 12#include <asm/stacktrace.h>
 13#include <asm/unwind.h>
 14
 15static int save_stack_address(struct stack_trace *trace, unsigned long addr,
 16			      bool nosched)
 17{
 
 
 
 
 
 
 
 
 
 
 
 18	if (nosched && in_sched_functions(addr))
 19		return 0;
 20
 21	if (trace->skip > 0) {
 22		trace->skip--;
 23		return 0;
 24	}
 
 
 
 25
 26	if (trace->nr_entries >= trace->max_entries)
 27		return -1;
 
 
 28
 29	trace->entries[trace->nr_entries++] = addr;
 30	return 0;
 
 
 31}
 32
 33static void noinline __save_stack_trace(struct stack_trace *trace,
 34			       struct task_struct *task, struct pt_regs *regs,
 35			       bool nosched)
 36{
 37	struct unwind_state state;
 38	unsigned long addr;
 39
 40	if (regs)
 41		save_stack_address(trace, regs->ip, nosched);
 42
 43	for (unwind_start(&state, task, regs, NULL); !unwind_done(&state);
 44	     unwind_next_frame(&state)) {
 45		addr = unwind_get_return_address(&state);
 46		if (!addr || save_stack_address(trace, addr, nosched))
 47			break;
 48	}
 49
 50	if (trace->nr_entries < trace->max_entries)
 51		trace->entries[trace->nr_entries++] = ULONG_MAX;
 52}
 
 
 53
 54/*
 55 * Save stack-backtrace addresses into a stack_trace buffer.
 56 */
 57void save_stack_trace(struct stack_trace *trace)
 58{
 59	trace->skip++;
 60	__save_stack_trace(trace, current, NULL, false);
 
 61}
 62EXPORT_SYMBOL_GPL(save_stack_trace);
 63
 64void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
 65{
 66	__save_stack_trace(trace, current, regs, false);
 
 
 67}
 68
 69void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
 70{
 71	if (!try_get_task_stack(tsk))
 72		return;
 73
 74	if (tsk == current)
 75		trace->skip++;
 76	__save_stack_trace(trace, tsk, NULL, true);
 77
 78	put_task_stack(tsk);
 79}
 80EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
 81
 82#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
 83
 84#define STACKTRACE_DUMP_ONCE(task) ({				\
 85	static bool __section(.data.unlikely) __dumped;		\
 86								\
 87	if (!__dumped) {					\
 88		__dumped = true;				\
 89		WARN_ON(1);					\
 90		show_stack(task, NULL);				\
 91	}							\
 92})
 93
 94static int __always_inline
 95__save_stack_trace_reliable(struct stack_trace *trace,
 96			    struct task_struct *task)
 97{
 98	struct unwind_state state;
 99	struct pt_regs *regs;
100	unsigned long addr;
101
102	for (unwind_start(&state, task, NULL, NULL); !unwind_done(&state);
103	     unwind_next_frame(&state)) {
104
105		regs = unwind_get_entry_regs(&state, NULL);
106		if (regs) {
107			/*
108			 * Kernel mode registers on the stack indicate an
109			 * in-kernel interrupt or exception (e.g., preemption
110			 * or a page fault), which can make frame pointers
111			 * unreliable.
112			 */
113			if (!user_mode(regs))
114				return -EINVAL;
115
116			/*
117			 * The last frame contains the user mode syscall
118			 * pt_regs.  Skip it and finish the unwind.
119			 */
120			unwind_next_frame(&state);
121			if (!unwind_done(&state)) {
122				STACKTRACE_DUMP_ONCE(task);
123				return -EINVAL;
124			}
125			break;
126		}
127
128		addr = unwind_get_return_address(&state);
129
130		/*
131		 * A NULL or invalid return address probably means there's some
132		 * generated code which __kernel_text_address() doesn't know
133		 * about.
134		 */
135		if (!addr) {
136			STACKTRACE_DUMP_ONCE(task);
137			return -EINVAL;
138		}
139
140		if (save_stack_address(trace, addr, false))
141			return -EINVAL;
142	}
143
144	/* Check for stack corruption */
145	if (unwind_error(&state)) {
146		STACKTRACE_DUMP_ONCE(task);
147		return -EINVAL;
148	}
149
150	if (trace->nr_entries < trace->max_entries)
151		trace->entries[trace->nr_entries++] = ULONG_MAX;
152
153	return 0;
154}
155
156/*
157 * This function returns an error if it detects any unreliable features of the
158 * stack.  Otherwise it guarantees that the stack trace is reliable.
159 *
160 * If the task is not 'current', the caller *must* ensure the task is inactive.
161 */
162int save_stack_trace_tsk_reliable(struct task_struct *tsk,
163				  struct stack_trace *trace)
164{
165	int ret;
166
167	/*
168	 * If the task doesn't have a stack (e.g., a zombie), the stack is
169	 * "reliably" empty.
170	 */
171	if (!try_get_task_stack(tsk))
172		return 0;
173
174	ret = __save_stack_trace_reliable(trace, tsk);
175
176	put_task_stack(tsk);
177
178	return ret;
179}
180#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
181
182/* Userspace stacktrace - based on kernel/trace/trace_sysprof.c */
183
184struct stack_frame_user {
185	const void __user	*next_fp;
186	unsigned long		ret_addr;
187};
188
189static int
190copy_stack_frame(const void __user *fp, struct stack_frame_user *frame)
191{
192	int ret;
193
194	if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
195		return 0;
196
197	ret = 1;
198	pagefault_disable();
199	if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
200		ret = 0;
201	pagefault_enable();
202
203	return ret;
204}
205
206static inline void __save_stack_trace_user(struct stack_trace *trace)
207{
208	const struct pt_regs *regs = task_pt_regs(current);
209	const void __user *fp = (const void __user *)regs->bp;
210
211	if (trace->nr_entries < trace->max_entries)
212		trace->entries[trace->nr_entries++] = regs->ip;
213
214	while (trace->nr_entries < trace->max_entries) {
215		struct stack_frame_user frame;
216
217		frame.next_fp = NULL;
218		frame.ret_addr = 0;
219		if (!copy_stack_frame(fp, &frame))
220			break;
221		if ((unsigned long)fp < regs->sp)
222			break;
223		if (frame.ret_addr) {
224			trace->entries[trace->nr_entries++] =
225				frame.ret_addr;
226		}
227		if (fp == frame.next_fp)
228			break;
229		fp = frame.next_fp;
230	}
231}
232
233void save_stack_trace_user(struct stack_trace *trace)
234{
235	/*
236	 * Trace user stack if we are not a kernel thread
237	 */
238	if (current->mm) {
239		__save_stack_trace_user(trace);
240	}
241	if (trace->nr_entries < trace->max_entries)
242		trace->entries[trace->nr_entries++] = ULONG_MAX;
243}