Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v6.8
 1// SPDX-License-Identifier: GPL-2.0
 2/*
 3 * Stack trace management functions
 4 *
 5 * Copyright (C) 2022 Loongson Technology Corporation Limited
 6 */
 7#include <linux/sched.h>
 8#include <linux/stacktrace.h>
 9#include <linux/uaccess.h>
10
11#include <asm/stacktrace.h>
12#include <asm/unwind.h>
13
14void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
15		     struct task_struct *task, struct pt_regs *regs)
16{
17	unsigned long addr;
18	struct pt_regs dummyregs;
19	struct unwind_state state;
20
21	if (!regs) {
22		regs = &dummyregs;
23
24		if (task == current) {
25			regs->regs[3] = (unsigned long)__builtin_frame_address(0);
26			regs->csr_era = (unsigned long)__builtin_return_address(0);
27		} else {
28			regs->regs[3] = thread_saved_fp(task);
29			regs->csr_era = thread_saved_ra(task);
30		}
31		regs->regs[1] = 0;
 
32	}
33
34	for (unwind_start(&state, task, regs);
35	     !unwind_done(&state); unwind_next_frame(&state)) {
36		addr = unwind_get_return_address(&state);
37		if (!addr || !consume_entry(cookie, addr))
38			break;
39	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40}
41
42static int
43copy_stack_frame(unsigned long fp, struct stack_frame *frame)
44{
45	int ret = 1;
46	unsigned long err;
47	unsigned long __user *user_frame_tail;
48
49	user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
50	if (!access_ok(user_frame_tail, sizeof(*frame)))
51		return 0;
52
53	pagefault_disable();
54	err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
55	if (err || (unsigned long)user_frame_tail >= frame->fp)
56		ret = 0;
57	pagefault_enable();
58
59	return ret;
60}
61
62void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
63			  const struct pt_regs *regs)
64{
65	unsigned long fp = regs->regs[22];
66
67	while (fp && !((unsigned long)fp & 0xf)) {
68		struct stack_frame frame;
69
70		frame.fp = 0;
71		frame.ra = 0;
72		if (!copy_stack_frame(fp, &frame))
73			break;
74		if (!frame.ra)
75			break;
76		if (!consume_entry(cookie, frame.ra))
77			break;
78		fp = frame.fp;
79	}
80}
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Stack trace management functions
  4 *
  5 * Copyright (C) 2022 Loongson Technology Corporation Limited
  6 */
  7#include <linux/sched.h>
  8#include <linux/stacktrace.h>
  9#include <linux/uaccess.h>
 10
 11#include <asm/stacktrace.h>
 12#include <asm/unwind.h>
 13
 14void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
 15		     struct task_struct *task, struct pt_regs *regs)
 16{
 17	unsigned long addr;
 18	struct pt_regs dummyregs;
 19	struct unwind_state state;
 20
 21	if (!regs) {
 22		regs = &dummyregs;
 23
 24		if (task == current) {
 25			regs->regs[3] = (unsigned long)__builtin_frame_address(0);
 26			regs->csr_era = (unsigned long)__builtin_return_address(0);
 27		} else {
 28			regs->regs[3] = thread_saved_fp(task);
 29			regs->csr_era = thread_saved_ra(task);
 30		}
 31		regs->regs[1] = 0;
 32		regs->regs[22] = 0;
 33	}
 34
 35	for (unwind_start(&state, task, regs);
 36	     !unwind_done(&state); unwind_next_frame(&state)) {
 37		addr = unwind_get_return_address(&state);
 38		if (!addr || !consume_entry(cookie, addr))
 39			break;
 40	}
 41}
 42
 43int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
 44			     void *cookie, struct task_struct *task)
 45{
 46	unsigned long addr;
 47	struct pt_regs dummyregs;
 48	struct pt_regs *regs = &dummyregs;
 49	struct unwind_state state;
 50
 51	if (task == current) {
 52		regs->regs[3] = (unsigned long)__builtin_frame_address(0);
 53		regs->csr_era = (unsigned long)__builtin_return_address(0);
 54	} else {
 55		regs->regs[3] = thread_saved_fp(task);
 56		regs->csr_era = thread_saved_ra(task);
 57	}
 58	regs->regs[1] = 0;
 59	regs->regs[22] = 0;
 60
 61	for (unwind_start(&state, task, regs);
 62	     !unwind_done(&state) && !unwind_error(&state); unwind_next_frame(&state)) {
 63		addr = unwind_get_return_address(&state);
 64
 65		/*
 66		 * A NULL or invalid return address probably means there's some
 67		 * generated code which __kernel_text_address() doesn't know about.
 68		 */
 69		if (!addr)
 70			return -EINVAL;
 71
 72		if (!consume_entry(cookie, addr))
 73			return -EINVAL;
 74	}
 75
 76	/* Check for stack corruption */
 77	if (unwind_error(&state))
 78		return -EINVAL;
 79
 80	return 0;
 81}
 82
 83static int
 84copy_stack_frame(unsigned long fp, struct stack_frame *frame)
 85{
 86	int ret = 1;
 87	unsigned long err;
 88	unsigned long __user *user_frame_tail;
 89
 90	user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
 91	if (!access_ok(user_frame_tail, sizeof(*frame)))
 92		return 0;
 93
 94	pagefault_disable();
 95	err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
 96	if (err || (unsigned long)user_frame_tail >= frame->fp)
 97		ret = 0;
 98	pagefault_enable();
 99
100	return ret;
101}
102
103void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
104			  const struct pt_regs *regs)
105{
106	unsigned long fp = regs->regs[22];
107
108	while (fp && !((unsigned long)fp & 0xf)) {
109		struct stack_frame frame;
110
111		frame.fp = 0;
112		frame.ra = 0;
113		if (!copy_stack_frame(fp, &frame))
114			break;
115		if (!frame.ra)
116			break;
117		if (!consume_entry(cookie, frame.ra))
118			break;
119		fp = frame.fp;
120	}
121}