Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *	stacktrace.c : stacktracing APIs needed by rest of kernel
  4 *			(wrappers over ARC dwarf based unwinder)
  5 *
  6 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  7 *
  8 *  vineetg: aug 2009
  9 *  -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
 10 *   for displaying task's kernel mode call stack in /proc/<pid>/stack
 11 *  -Iterator based approach to have single copy of unwinding core and APIs
 12 *   needing unwinding, implement the logic in iterator regarding:
 13 *      = which frame onwards to start capture
 14 *      = which frame to stop capturing (wchan)
 15 *      = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
 16 *
 17 *  vineetg: March 2009
 18 *  -Implemented correct versions of thread_saved_pc() and __get_wchan()
 19 *
 20 *  rajeshwarr: 2008
 21 *  -Initial implementation
 22 */
 23
 24#include <linux/ptrace.h>
 25#include <linux/export.h>
 26#include <linux/stacktrace.h>
 27#include <linux/kallsyms.h>
 28#include <linux/sched/debug.h>
 29
 30#include <asm/arcregs.h>
 31#include <asm/unwind.h>
 32#include <asm/stacktrace.h>
 33#include <asm/switch_to.h>
 34
 35/*-------------------------------------------------------------------------
 36 *              Unwinder Iterator
 37 *-------------------------------------------------------------------------
 38 */
 39
 40#ifdef CONFIG_ARC_DW2_UNWIND
 41
 42static int
 43seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
 44		       struct unwind_frame_info *frame_info)
 45{
 46	if (regs) {
 47		/*
 48		 * Asynchronous unwinding of intr/exception
 49		 *  - Just uses the pt_regs passed
 50		 */
 51		frame_info->task = tsk;
 52
 53		frame_info->regs.r27 = regs->fp;
 54		frame_info->regs.r28 = regs->sp;
 55		frame_info->regs.r31 = regs->blink;
 56		frame_info->regs.r63 = regs->ret;
 57		frame_info->call_frame = 0;
 58	} else if (tsk == NULL || tsk == current) {
 59		/*
 60		 * synchronous unwinding (e.g. dump_stack)
 61		 *  - uses current values of SP and friends
 62		 */
 63		unsigned long fp, sp, blink, ret;
 64		frame_info->task = current;
 65
 66		__asm__ __volatile__(
 67			"mov %0,r27\n\t"
 68			"mov %1,r28\n\t"
 69			"mov %2,r31\n\t"
 70			"mov %3,r63\n\t"
 71			: "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
 72		);
 73
 74		frame_info->regs.r27 = fp;
 75		frame_info->regs.r28 = sp;
 76		frame_info->regs.r31 = blink;
 77		frame_info->regs.r63 = ret;
 78		frame_info->call_frame = 0;
 79	} else {
 80		/*
 81		 * Asynchronous unwinding of a likely sleeping task
 82		 *  - first ensure it is actually sleeping
 83		 *  - if so, it will be in __switch_to, kernel mode SP of task
 84		 *    is safe-kept and BLINK at a well known location in there
 85		 */
 86
 87		if (task_is_running(tsk))
 88			return -1;
 89
 90		frame_info->task = tsk;
 91
 92		frame_info->regs.r27 = TSK_K_FP(tsk);
 93		frame_info->regs.r28 = TSK_K_ESP(tsk);
 94		frame_info->regs.r31 = TSK_K_BLINK(tsk);
 95		frame_info->regs.r63 = (unsigned int)__switch_to;
 96
 97		/* In the prologue of __switch_to, first FP is saved on stack
 98		 * and then SP is copied to FP. Dwarf assumes cfa as FP based
 99		 * but we didn't save FP. The value retrieved above is FP's
100		 * state in previous frame.
101		 * As a work around for this, we unwind from __switch_to start
102		 * and adjust SP accordingly. The other limitation is that
103		 * __switch_to macro is dwarf rules are not generated for inline
104		 * assembly code
105		 */
106		frame_info->regs.r27 = 0;
107		frame_info->regs.r28 += 60;
108		frame_info->call_frame = 0;
109
110	}
111	return 0;
112}
113
114#endif
115
116notrace noinline unsigned int
117arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
118		int (*consumer_fn) (unsigned int, void *), void *arg)
119{
120#ifdef CONFIG_ARC_DW2_UNWIND
121	int ret = 0, cnt = 0;
122	unsigned int address;
123	struct unwind_frame_info frame_info;
124
125	if (seed_unwind_frame_info(tsk, regs, &frame_info))
126		return 0;
127
128	while (1) {
129		address = UNW_PC(&frame_info);
130
131		if (!address || !__kernel_text_address(address))
132			break;
133
134		if (consumer_fn(address, arg) == -1)
135			break;
136
137		ret = arc_unwind(&frame_info);
138		if (ret)
139			break;
140
141		frame_info.regs.r63 = frame_info.regs.r31;
142
143		if (cnt++ > 128) {
144			printk("unwinder looping too long, aborting !\n");
145			return 0;
146		}
147	}
148
149	return address;		/* return the last address it saw */
150#else
151	/* On ARC, only Dward based unwinder works. fp based backtracing is
152	 * not possible (-fno-omit-frame-pointer) because of the way function
153	 * prologue is setup (callee regs saved and then fp set and not other
154	 * way around
155	 */
156	pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
157	return 0;
158
159#endif
160}
161
162/*-------------------------------------------------------------------------
163 * callbacks called by unwinder iterator to implement kernel APIs
164 *
165 * The callback can return -1 to force the iterator to stop, which by default
166 * keeps going till the bottom-most frame.
167 *-------------------------------------------------------------------------
168 */
169
170/* Call-back which plugs into unwinding core to dump the stack in
171 * case of panic/OOPs/BUG etc
172 */
173static int __print_sym(unsigned int address, void *arg)
174{
175	const char *loglvl = arg;
176
177	printk("%s  %pS\n", loglvl, (void *)address);
178	return 0;
179}
180
181#ifdef CONFIG_STACKTRACE
182
183/* Call-back which plugs into unwinding core to capture the
184 * traces needed by kernel on /proc/<pid>/stack
185 */
186static int __collect_all(unsigned int address, void *arg)
187{
188	struct stack_trace *trace = arg;
189
190	if (trace->skip > 0)
191		trace->skip--;
192	else
193		trace->entries[trace->nr_entries++] = address;
194
195	if (trace->nr_entries >= trace->max_entries)
196		return -1;
197
198	return 0;
199}
200
201static int __collect_all_but_sched(unsigned int address, void *arg)
202{
203	struct stack_trace *trace = arg;
204
205	if (in_sched_functions(address))
206		return 0;
207
208	if (trace->skip > 0)
209		trace->skip--;
210	else
211		trace->entries[trace->nr_entries++] = address;
212
213	if (trace->nr_entries >= trace->max_entries)
214		return -1;
215
216	return 0;
217}
218
219#endif
220
221static int __get_first_nonsched(unsigned int address, void *unused)
222{
223	if (in_sched_functions(address))
224		return 0;
225
226	return -1;
227}
228
229/*-------------------------------------------------------------------------
230 *              APIs expected by various kernel sub-systems
231 *-------------------------------------------------------------------------
232 */
233
234noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
235			      const char *loglvl)
236{
237	printk("%s\nStack Trace:\n", loglvl);
238	arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
239}
240EXPORT_SYMBOL(show_stacktrace);
241
242/* Expected by sched Code */
243void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
244{
245	show_stacktrace(tsk, NULL, loglvl);
246}
247
248/* Another API expected by schedular, shows up in "ps" as Wait Channel
249 * Of course just returning schedule( ) would be pointless so unwind until
250 * the function is not in schedular code
251 */
252unsigned int __get_wchan(struct task_struct *tsk)
253{
254	return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
255}
256
257#ifdef CONFIG_STACKTRACE
258
259/*
260 * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
261 * A typical use is when /proc/<pid>/stack is queried by userland
262 */
263void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
264{
265	/* Assumes @tsk is sleeping so unwinds from __switch_to */
266	arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
267}
268
269void save_stack_trace(struct stack_trace *trace)
270{
271	/* Pass NULL for task so it unwinds the current call frame */
272	arc_unwind_core(NULL, NULL, __collect_all, trace);
273}
274EXPORT_SYMBOL_GPL(save_stack_trace);
275#endif
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *	stacktrace.c : stacktracing APIs needed by rest of kernel
  4 *			(wrappers over ARC dwarf based unwinder)
  5 *
  6 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  7 *
  8 *  vineetg: aug 2009
  9 *  -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( )
 10 *   for displaying task's kernel mode call stack in /proc/<pid>/stack
 11 *  -Iterator based approach to have single copy of unwinding core and APIs
 12 *   needing unwinding, implement the logic in iterator regarding:
 13 *      = which frame onwards to start capture
 14 *      = which frame to stop capturing (wchan)
 15 *      = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc)
 16 *
 17 *  vineetg: March 2009
 18 *  -Implemented correct versions of thread_saved_pc() and get_wchan()
 19 *
 20 *  rajeshwarr: 2008
 21 *  -Initial implementation
 22 */
 23
 24#include <linux/ptrace.h>
 25#include <linux/export.h>
 26#include <linux/stacktrace.h>
 27#include <linux/kallsyms.h>
 28#include <linux/sched/debug.h>
 29
 30#include <asm/arcregs.h>
 31#include <asm/unwind.h>
 
 32#include <asm/switch_to.h>
 33
 34/*-------------------------------------------------------------------------
 35 *              Unwinder Iterator
 36 *-------------------------------------------------------------------------
 37 */
 38
 39#ifdef CONFIG_ARC_DW2_UNWIND
 40
 41static int
 42seed_unwind_frame_info(struct task_struct *tsk, struct pt_regs *regs,
 43		       struct unwind_frame_info *frame_info)
 44{
 45	if (regs) {
 46		/*
 47		 * Asynchronous unwinding of intr/exception
 48		 *  - Just uses the pt_regs passed
 49		 */
 50		frame_info->task = tsk;
 51
 52		frame_info->regs.r27 = regs->fp;
 53		frame_info->regs.r28 = regs->sp;
 54		frame_info->regs.r31 = regs->blink;
 55		frame_info->regs.r63 = regs->ret;
 56		frame_info->call_frame = 0;
 57	} else if (tsk == NULL || tsk == current) {
 58		/*
 59		 * synchronous unwinding (e.g. dump_stack)
 60		 *  - uses current values of SP and friends
 61		 */
 62		unsigned long fp, sp, blink, ret;
 63		frame_info->task = current;
 64
 65		__asm__ __volatile__(
 66			"mov %0,r27\n\t"
 67			"mov %1,r28\n\t"
 68			"mov %2,r31\n\t"
 69			"mov %3,r63\n\t"
 70			: "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret)
 71		);
 72
 73		frame_info->regs.r27 = fp;
 74		frame_info->regs.r28 = sp;
 75		frame_info->regs.r31 = blink;
 76		frame_info->regs.r63 = ret;
 77		frame_info->call_frame = 0;
 78	} else {
 79		/*
 80		 * Asynchronous unwinding of a likely sleeping task
 81		 *  - first ensure it is actually sleeping
 82		 *  - if so, it will be in __switch_to, kernel mode SP of task
 83		 *    is safe-kept and BLINK at a well known location in there
 84		 */
 85
 86		if (task_is_running(tsk))
 87			return -1;
 88
 89		frame_info->task = tsk;
 90
 91		frame_info->regs.r27 = TSK_K_FP(tsk);
 92		frame_info->regs.r28 = TSK_K_ESP(tsk);
 93		frame_info->regs.r31 = TSK_K_BLINK(tsk);
 94		frame_info->regs.r63 = (unsigned int)__switch_to;
 95
 96		/* In the prologue of __switch_to, first FP is saved on stack
 97		 * and then SP is copied to FP. Dwarf assumes cfa as FP based
 98		 * but we didn't save FP. The value retrieved above is FP's
 99		 * state in previous frame.
100		 * As a work around for this, we unwind from __switch_to start
101		 * and adjust SP accordingly. The other limitation is that
102		 * __switch_to macro is dwarf rules are not generated for inline
103		 * assembly code
104		 */
105		frame_info->regs.r27 = 0;
106		frame_info->regs.r28 += 60;
107		frame_info->call_frame = 0;
108
109	}
110	return 0;
111}
112
113#endif
114
115notrace noinline unsigned int
116arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs,
117		int (*consumer_fn) (unsigned int, void *), void *arg)
118{
119#ifdef CONFIG_ARC_DW2_UNWIND
120	int ret = 0, cnt = 0;
121	unsigned int address;
122	struct unwind_frame_info frame_info;
123
124	if (seed_unwind_frame_info(tsk, regs, &frame_info))
125		return 0;
126
127	while (1) {
128		address = UNW_PC(&frame_info);
129
130		if (!address || !__kernel_text_address(address))
131			break;
132
133		if (consumer_fn(address, arg) == -1)
134			break;
135
136		ret = arc_unwind(&frame_info);
137		if (ret)
138			break;
139
140		frame_info.regs.r63 = frame_info.regs.r31;
141
142		if (cnt++ > 128) {
143			printk("unwinder looping too long, aborting !\n");
144			return 0;
145		}
146	}
147
148	return address;		/* return the last address it saw */
149#else
150	/* On ARC, only Dward based unwinder works. fp based backtracing is
151	 * not possible (-fno-omit-frame-pointer) because of the way function
152	 * prelogue is setup (callee regs saved and then fp set and not other
153	 * way around
154	 */
155	pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n");
156	return 0;
157
158#endif
159}
160
161/*-------------------------------------------------------------------------
162 * callbacks called by unwinder iterator to implement kernel APIs
163 *
164 * The callback can return -1 to force the iterator to stop, which by default
165 * keeps going till the bottom-most frame.
166 *-------------------------------------------------------------------------
167 */
168
169/* Call-back which plugs into unwinding core to dump the stack in
170 * case of panic/OOPs/BUG etc
171 */
172static int __print_sym(unsigned int address, void *arg)
173{
174	const char *loglvl = arg;
175
176	printk("%s  %pS\n", loglvl, (void *)address);
177	return 0;
178}
179
180#ifdef CONFIG_STACKTRACE
181
182/* Call-back which plugs into unwinding core to capture the
183 * traces needed by kernel on /proc/<pid>/stack
184 */
185static int __collect_all(unsigned int address, void *arg)
186{
187	struct stack_trace *trace = arg;
188
189	if (trace->skip > 0)
190		trace->skip--;
191	else
192		trace->entries[trace->nr_entries++] = address;
193
194	if (trace->nr_entries >= trace->max_entries)
195		return -1;
196
197	return 0;
198}
199
200static int __collect_all_but_sched(unsigned int address, void *arg)
201{
202	struct stack_trace *trace = arg;
203
204	if (in_sched_functions(address))
205		return 0;
206
207	if (trace->skip > 0)
208		trace->skip--;
209	else
210		trace->entries[trace->nr_entries++] = address;
211
212	if (trace->nr_entries >= trace->max_entries)
213		return -1;
214
215	return 0;
216}
217
218#endif
219
220static int __get_first_nonsched(unsigned int address, void *unused)
221{
222	if (in_sched_functions(address))
223		return 0;
224
225	return -1;
226}
227
228/*-------------------------------------------------------------------------
229 *              APIs expected by various kernel sub-systems
230 *-------------------------------------------------------------------------
231 */
232
233noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs,
234			      const char *loglvl)
235{
236	printk("%s\nStack Trace:\n", loglvl);
237	arc_unwind_core(tsk, regs, __print_sym, (void *)loglvl);
238}
239EXPORT_SYMBOL(show_stacktrace);
240
241/* Expected by sched Code */
242void show_stack(struct task_struct *tsk, unsigned long *sp, const char *loglvl)
243{
244	show_stacktrace(tsk, NULL, loglvl);
245}
246
247/* Another API expected by schedular, shows up in "ps" as Wait Channel
248 * Of course just returning schedule( ) would be pointless so unwind until
249 * the function is not in schedular code
250 */
251unsigned int get_wchan(struct task_struct *tsk)
252{
253	return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL);
254}
255
256#ifdef CONFIG_STACKTRACE
257
258/*
259 * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP.
260 * A typical use is when /proc/<pid>/stack is queried by userland
261 */
262void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
263{
264	/* Assumes @tsk is sleeping so unwinds from __switch_to */
265	arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace);
266}
267
268void save_stack_trace(struct stack_trace *trace)
269{
270	/* Pass NULL for task so it unwinds the current call frame */
271	arc_unwind_core(NULL, NULL, __collect_all, trace);
272}
273EXPORT_SYMBOL_GPL(save_stack_trace);
274#endif