Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *	stacktrace.c : stacktracing APIs needed by rest of kernel
  4 *			(wrappers over ARC dwarf based unwinder)
  5 *
  6 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  7 *
  8 *  vineetg: aug 2009
  9 *  -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
 10 *   for displaying task's kernel mode call stack in /proc/<pid>/stack
 11 *  -Iterator based approach to have single copy of unwinding core and APIs
 12 *   needing unwinding, implement the logic in iterator regarding:
 13 *      = which frame onwards to start capture
 14 *      = which frame to stop capturing (wchan)
 15 *      = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
 16 *
 17 *  vineetg: March 2009
 18 *  -Implemented correct versions of thread_saved_pc() and get_wchan()
 19 *
 20 *  rajeshwarr: 2008
 21 *  -Initial implementation
 22 */
 23
 24#include <linux/ptrace.h>
 25#include <linux/export.h>
 26#include <linux/stacktrace.h>
 27#include <linux/kallsyms.h>
 28#include <linux/sched/debug.h>
 29
 30#include <asm/arcregs.h>
 31#include <asm/unwind.h>
 32#include <asm/switch_to.h>
 33
 34/*-------------------------------------------------------------------------
 35 *              Unwinder Iterator
 36 *-------------------------------------------------------------------------
 37 */
 38
 39#ifdef CONFIG_ARC_DW2_UNWIND
 40
 41static void seed_unwind_frame_info(struct task_struct *tsk,
 42				   struct pt_regs *regs,
 43				   struct unwind_frame_info *frame_info)
 44{
 45	/*
 46	 * synchronous unwinding (e.g. dump_stack)
 47	 *  - uses current values of SP and friends
 48	 */
 49	if (tsk == NULL && regs == NULL) {
 
 
 
 
 
 
 
 
 
 
 
 
 50		unsigned long fp, sp, blink, ret;
 51		frame_info->task = current;
 52
 53		__asm__ __volatile__(
 54			"mov %0,r27\n\t"
 55			"mov %1,r28\n\t"
 56			"mov %2,r31\n\t"
 57			"mov %3,r63\n\t"
 58			: "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
 59		);
 60
 61		frame_info->regs.r27 = fp;
 62		frame_info->regs.r28 = sp;
 63		frame_info->regs.r31 = blink;
 64		frame_info->regs.r63 = ret;
 65		frame_info->call_frame = 0;
 66	} else if (regs == NULL) {
 67		/*
 68		 * Asynchronous unwinding of sleeping task
 69		 *  - Gets SP etc from task's pt_regs (saved bottom of kernel
 70		 *    mode stack of task)
 
 71		 */
 72
 
 
 
 73		frame_info->task = tsk;
 74
 75		frame_info->regs.r27 = TSK_K_FP(tsk);
 76		frame_info->regs.r28 = TSK_K_ESP(tsk);
 77		frame_info->regs.r31 = TSK_K_BLINK(tsk);
 78		frame_info->regs.r63 = (unsigned int)__switch_to;
 79
 80		/* In the prologue of __switch_to, first FP is saved on stack
 81		 * and then SP is copied to FP. Dwarf assumes cfa as FP based
 82		 * but we didn't save FP. The value retrieved above is FP's
 83		 * state in previous frame.
 84		 * As a work around for this, we unwind from __switch_to start
 85		 * and adjust SP accordingly. The other limitation is that
 86		 * __switch_to macro is dwarf rules are not generated for inline
 87		 * assembly code
 88		 */
 89		frame_info->regs.r27 = 0;
 90		frame_info->regs.r28 += 60;
 91		frame_info->call_frame = 0;
 92
 93	} else {
 94		/*
 95		 * Asynchronous unwinding of intr/exception
 96		 *  - Just uses the pt_regs passed
 97		 */
 98		frame_info->task = tsk;
 99
100		frame_info->regs.r27 = regs->fp;
101		frame_info->regs.r28 = regs->sp;
102		frame_info->regs.r31 = regs->blink;
103		frame_info->regs.r63 = regs->ret;
104		frame_info->call_frame = 0;
105	}
 
106}
107
108#endif
109
110notrace noinline unsigned int
111arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
112		int (*consumer_fn) (unsigned int, void *), void *arg)
113{
114#ifdef CONFIG_ARC_DW2_UNWIND
115	int ret = 0;
116	unsigned int address;
117	struct unwind_frame_info frame_info;
118
119	seed_unwind_frame_info(tsk, regs, &frame_info);
 
120
121	while (1) {
122		address = UNW_PC(&frame_info);
123
124		if (!address || !__kernel_text_address(address))
125			break;
126
127		if (consumer_fn(address, arg) == -1)
128			break;
129
130		ret = arc_unwind(&frame_info);
131		if (ret)
132			break;
133
134		frame_info.regs.r63 = frame_info.regs.r31;
 
 
 
 
 
135	}
136
137	return address;		/* return the last address it saw */
138#else
139	/* On ARC, only Dward based unwinder works. fp based backtracing is
140	 * not possible (-fno-omit-frame-pointer) because of the way function
141	 * prelogue is setup (callee regs saved and then fp set and not other
142	 * way around
143	 */
144	pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
145	return 0;
146
147#endif
148}
149
150/*-------------------------------------------------------------------------
151 * callbacks called by unwinder iterator to implement kernel APIs
152 *
153 * The callback can return -1 to force the iterator to stop, which by default
154 * keeps going till the bottom-most frame.
155 *-------------------------------------------------------------------------
156 */
157
158/* Call-back which plugs into unwinding core to dump the stack in
159 * case of panic/OOPs/BUG etc
160 */
161static int __print_sym(unsigned int address, void *arg)
162{
163	const char *loglvl = arg;
164
165	printk("%s  %pS\n", loglvl, (void *)address);
166	return 0;
167}
168
169#ifdef CONFIG_STACKTRACE
170
171/* Call-back which plugs into unwinding core to capture the
172 * traces needed by kernel on /proc/<pid>/stack
173 */
174static int __collect_all(unsigned int address, void *arg)
175{
176	struct stack_trace *trace = arg;
177
178	if (trace->skip > 0)
179		trace->skip--;
180	else
181		trace->entries[trace->nr_entries++] = address;
182
183	if (trace->nr_entries >= trace->max_entries)
184		return -1;
185
186	return 0;
187}
188
189static int __collect_all_but_sched(unsigned int address, void *arg)
190{
191	struct stack_trace *trace = arg;
192
193	if (in_sched_functions(address))
194		return 0;
195
196	if (trace->skip > 0)
197		trace->skip--;
198	else
199		trace->entries[trace->nr_entries++] = address;
200
201	if (trace->nr_entries >= trace->max_entries)
202		return -1;
203
204	return 0;
205}
206
207#endif
208
209static int __get_first_nonsched(unsigned int address, void *unused)
210{
211	if (in_sched_functions(address))
212		return 0;
213
214	return -1;
215}
216
217/*-------------------------------------------------------------------------
218 *              APIs expected by various kernel sub-systems
219 *-------------------------------------------------------------------------
220 */
221
222noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
223			      const char *loglvl)
224{
225	printk("%s\nStack Trace:\n", loglvl);
226	arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
227}
228EXPORT_SYMBOL(show_stacktrace);
229
230/* Expected by sched Code */
231void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
232{
233	show_stacktrace(tsk, NULL, loglvl);
234}
235
236/* Another API expected by schedular, shows up in "ps" as Wait Channel
237 * Of course just returning schedule( ) would be pointless so unwind until
238 * the function is not in schedular code
239 */
240unsigned int get_wchan(struct task_struct *tsk)
241{
242	return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
243}
244
245#ifdef CONFIG_STACKTRACE
246
247/*
248 * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
249 * A typical use is when /proc/<pid>/stack is queried by userland
250 */
251void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
252{
253	/* Assumes @tsk is sleeping so unwinds from __switch_to */
254	arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
255}
256
257void save_stack_trace(struct stack_trace *trace)
258{
259	/* Pass NULL for task so it unwinds the current call frame */
260	arc_unwind_core(NULL, NULL, __collect_all, trace);
261}
262EXPORT_SYMBOL_GPL(save_stack_trace);
263#endif
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *	stacktrace.c : stacktracing APIs needed by rest of kernel
  4 *			(wrappers over ARC dwarf based unwinder)
  5 *
  6 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  7 *
  8 *  vineetg: aug 2009
  9 *  -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
 10 *   for displaying task's kernel mode call stack in /proc/<pid>/stack
 11 *  -Iterator based approach to have single copy of unwinding core and APIs
 12 *   needing unwinding, implement the logic in iterator regarding:
 13 *      = which frame onwards to start capture
 14 *      = which frame to stop capturing (wchan)
 15 *      = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
 16 *
 17 *  vineetg: March 2009
 18 *  -Implemented correct versions of thread_saved_pc() and get_wchan()
 19 *
 20 *  rajeshwarr: 2008
 21 *  -Initial implementation
 22 */
 23
 24#include <linux/ptrace.h>
 25#include <linux/export.h>
 26#include <linux/stacktrace.h>
 27#include <linux/kallsyms.h>
 28#include <linux/sched/debug.h>
 29
 30#include <asm/arcregs.h>
 31#include <asm/unwind.h>
 32#include <asm/switch_to.h>
 33
 34/*-------------------------------------------------------------------------
 35 *              Unwinder Iterator
 36 *-------------------------------------------------------------------------
 37 */
 38
 39#ifdef CONFIG_ARC_DW2_UNWIND
 40
 41static int
 42seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
 43		       struct unwind_frame_info *frame_info)
 44{
 45	if (regs) {
 46		/*
 47		 * Asynchronous unwinding of intr/exception
 48		 *  - Just uses the pt_regs passed
 49		 */
 50		frame_info->task = tsk;
 51
 52		frame_info->regs.r27 = regs->fp;
 53		frame_info->regs.r28 = regs->sp;
 54		frame_info->regs.r31 = regs->blink;
 55		frame_info->regs.r63 = regs->ret;
 56		frame_info->call_frame = 0;
 57	} else if (tsk == NULL || tsk == current) {
 58		/*
 59		 * synchronous unwinding (e.g. dump_stack)
 60		 *  - uses current values of SP and friends
 61		 */
 62		unsigned long fp, sp, blink, ret;
 63		frame_info->task = current;
 64
 65		__asm__ __volatile__(
 66			"mov %0,r27\n\t"
 67			"mov %1,r28\n\t"
 68			"mov %2,r31\n\t"
 69			"mov %3,r63\n\t"
 70			: "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
 71		);
 72
 73		frame_info->regs.r27 = fp;
 74		frame_info->regs.r28 = sp;
 75		frame_info->regs.r31 = blink;
 76		frame_info->regs.r63 = ret;
 77		frame_info->call_frame = 0;
 78	} else {
 79		/*
 80		 * Asynchronous unwinding of a likely sleeping task
 81		 *  - first ensure it is actually sleeping
 82		 *  - if so, it will be in __switch_to, kernel mode SP of task
 83		 *    is safe-kept and BLINK at a well known location in there
 84		 */
 85
 86		if (task_is_running(tsk))
 87			return -1;
 88
 89		frame_info->task = tsk;
 90
 91		frame_info->regs.r27 = TSK_K_FP(tsk);
 92		frame_info->regs.r28 = TSK_K_ESP(tsk);
 93		frame_info->regs.r31 = TSK_K_BLINK(tsk);
 94		frame_info->regs.r63 = (unsigned int)__switch_to;
 95
 96		/* In the prologue of __switch_to, first FP is saved on stack
 97		 * and then SP is copied to FP. Dwarf assumes cfa as FP based
 98		 * but we didn't save FP. The value retrieved above is FP's
 99		 * state in previous frame.
100		 * As a work around for this, we unwind from __switch_to start
101		 * and adjust SP accordingly. The other limitation is that
102		 * __switch_to macro is dwarf rules are not generated for inline
103		 * assembly code
104		 */
105		frame_info->regs.r27 = 0;
106		frame_info->regs.r28 += 60;
107		frame_info->call_frame = 0;
108
 
 
 
 
 
 
 
 
 
 
 
 
109	}
110	return 0;
111}
112
113#endif
114
115notrace noinline unsigned int
116arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
117		int (*consumer_fn) (unsigned int, void *), void *arg)
118{
119#ifdef CONFIG_ARC_DW2_UNWIND
120	int ret = 0, cnt = 0;
121	unsigned int address;
122	struct unwind_frame_info frame_info;
123
124	if (seed_unwind_frame_info(tsk, regs, &frame_info))
125		return 0;
126
127	while (1) {
128		address = UNW_PC(&frame_info);
129
130		if (!address || !__kernel_text_address(address))
131			break;
132
133		if (consumer_fn(address, arg) == -1)
134			break;
135
136		ret = arc_unwind(&frame_info);
137		if (ret)
138			break;
139
140		frame_info.regs.r63 = frame_info.regs.r31;
141
142		if (cnt++ > 128) {
143			printk("unwinder looping too long, aborting !\n");
144			return 0;
145		}
146	}
147
148	return address;		/* return the last address it saw */
149#else
150	/* On ARC, only Dward based unwinder works. fp based backtracing is
151	 * not possible (-fno-omit-frame-pointer) because of the way function
152	 * prelogue is setup (callee regs saved and then fp set and not other
153	 * way around
154	 */
155	pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
156	return 0;
157
158#endif
159}
160
161/*-------------------------------------------------------------------------
162 * callbacks called by unwinder iterator to implement kernel APIs
163 *
164 * The callback can return -1 to force the iterator to stop, which by default
165 * keeps going till the bottom-most frame.
166 *-------------------------------------------------------------------------
167 */
168
169/* Call-back which plugs into unwinding core to dump the stack in
170 * case of panic/OOPs/BUG etc
171 */
172static int __print_sym(unsigned int address, void *arg)
173{
174	const char *loglvl = arg;
175
176	printk("%s  %pS\n", loglvl, (void *)address);
177	return 0;
178}
179
180#ifdef CONFIG_STACKTRACE
181
182/* Call-back which plugs into unwinding core to capture the
183 * traces needed by kernel on /proc/<pid>/stack
184 */
185static int __collect_all(unsigned int address, void *arg)
186{
187	struct stack_trace *trace = arg;
188
189	if (trace->skip > 0)
190		trace->skip--;
191	else
192		trace->entries[trace->nr_entries++] = address;
193
194	if (trace->nr_entries >= trace->max_entries)
195		return -1;
196
197	return 0;
198}
199
200static int __collect_all_but_sched(unsigned int address, void *arg)
201{
202	struct stack_trace *trace = arg;
203
204	if (in_sched_functions(address))
205		return 0;
206
207	if (trace->skip > 0)
208		trace->skip--;
209	else
210		trace->entries[trace->nr_entries++] = address;
211
212	if (trace->nr_entries >= trace->max_entries)
213		return -1;
214
215	return 0;
216}
217
218#endif
219
220static int __get_first_nonsched(unsigned int address, void *unused)
221{
222	if (in_sched_functions(address))
223		return 0;
224
225	return -1;
226}
227
228/*-------------------------------------------------------------------------
229 *              APIs expected by various kernel sub-systems
230 *-------------------------------------------------------------------------
231 */
232
233noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
234			      const char *loglvl)
235{
236	printk("%s\nStack Trace:\n", loglvl);
237	arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
238}
239EXPORT_SYMBOL(show_stacktrace);
240
241/* Expected by sched Code */
242void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
243{
244	show_stacktrace(tsk, NULL, loglvl);
245}
246
247/* Another API expected by schedular, shows up in "ps" as Wait Channel
248 * Of course just returning schedule( ) would be pointless so unwind until
249 * the function is not in schedular code
250 */
251unsigned int get_wchan(struct task_struct *tsk)
252{
253	return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
254}
255
256#ifdef CONFIG_STACKTRACE
257
258/*
259 * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
260 * A typical use is when /proc/<pid>/stack is queried by userland
261 */
262void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
263{
264	/* Assumes @tsk is sleeping so unwinds from __switch_to */
265	arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
266}
267
268void save_stack_trace(struct stack_trace *trace)
269{
270	/* Pass NULL for task so it unwinds the current call frame */
271	arc_unwind_core(NULL, NULL, __collect_all, trace);
272}
273EXPORT_SYMBOL_GPL(save_stack_trace);
274#endif