Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Stack trace management functions
  4 *
  5 *  Copyright IBM Corp. 2006
  6 */
  7
  8#include <linux/perf_event.h>
  9#include <linux/stacktrace.h>
 10#include <linux/uaccess.h>
 11#include <linux/compat.h>
 12#include <asm/stacktrace.h>
 13#include <asm/unwind.h>
 14#include <asm/kprobes.h>
 15#include <asm/ptrace.h>
 16
 17void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
 18		     struct task_struct *task, struct pt_regs *regs)
 19{
 20	struct unwind_state state;
 21	unsigned long addr;
 22
 23	unwind_for_each_frame(&state, task, regs, 0) {
 24		addr = unwind_get_return_address(&state);
 25		if (!addr || !consume_entry(cookie, addr))
 26			break;
 27	}
 28}
 29
 30int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
 31			     void *cookie, struct task_struct *task)
 32{
 33	struct unwind_state state;
 34	unsigned long addr;
 35
 36	unwind_for_each_frame(&state, task, NULL, 0) {
 37		if (state.stack_info.type != STACK_TYPE_TASK)
 38			return -EINVAL;
 39
 40		if (state.regs)
 41			return -EINVAL;
 42
 43		addr = unwind_get_return_address(&state);
 44		if (!addr)
 45			return -EINVAL;
 46
 47#ifdef CONFIG_RETHOOK
 48		/*
 49		 * Mark stacktraces with krethook functions on them
 50		 * as unreliable.
 51		 */
 52		if (state.ip == (unsigned long)arch_rethook_trampoline)
 53			return -EINVAL;
 54#endif
 55
 56		if (!consume_entry(cookie, addr))
 57			return -EINVAL;
 58	}
 59
 60	/* Check for stack corruption */
 61	if (unwind_error(&state))
 62		return -EINVAL;
 63	return 0;
 64}
 65
 66static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
 67			    struct perf_callchain_entry_ctx *entry, bool perf,
 68			    unsigned long ip)
 69{
 70#ifdef CONFIG_PERF_EVENTS
 71	if (perf) {
 72		if (perf_callchain_store(entry, ip))
 73			return false;
 74		return true;
 75	}
 76#endif
 77	return consume_entry(cookie, ip);
 78}
 79
 80static inline bool ip_invalid(unsigned long ip)
 81{
 82	/*
 83	 * Perform some basic checks if an instruction address taken
 84	 * from unreliable source is invalid.
 85	 */
 86	if (ip & 1)
 87		return true;
 88	if (ip < mmap_min_addr)
 89		return true;
 90	if (ip >= current->mm->context.asce_limit)
 91		return true;
 92	return false;
 93}
 94
 95static inline bool ip_within_vdso(unsigned long ip)
 96{
 97	return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
 98}
 99
100void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
101				 struct perf_callchain_entry_ctx *entry,
102				 const struct pt_regs *regs, bool perf)
103{
104	struct stack_frame_vdso_wrapper __user *sf_vdso;
105	struct stack_frame_user __user *sf;
106	unsigned long ip, sp;
107	bool first = true;
108
109	if (is_compat_task())
110		return;
111	if (!current->mm)
112		return;
113	ip = instruction_pointer(regs);
114	if (!store_ip(consume_entry, cookie, entry, perf, ip))
115		return;
116	sf = (void __user *)user_stack_pointer(regs);
117	pagefault_disable();
118	while (1) {
119		if (__get_user(sp, &sf->back_chain))
120			break;
121		/*
122		 * VDSO entry code has a non-standard stack frame layout.
123		 * See VDSO user wrapper code for details.
124		 */
125		if (!sp && ip_within_vdso(ip)) {
126			sf_vdso = (void __user *)sf;
127			if (__get_user(ip, &sf_vdso->return_address))
128				break;
129			sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
130			sf = (void __user *)sp;
131			if (__get_user(sp, &sf->back_chain))
132				break;
133		} else {
134			sf = (void __user *)sp;
135			if (__get_user(ip, &sf->gprs[8]))
136				break;
137		}
138		/* Sanity check: ABI requires SP to be 8 byte aligned. */
139		if (sp & 0x7)
140			break;
141		if (ip_invalid(ip)) {
142			/*
143			 * If the instruction address is invalid, and this
144			 * is the first stack frame, assume r14 has not
145			 * been written to the stack yet. Otherwise exit.
146			 */
147			if (!first)
148				break;
149			ip = regs->gprs[14];
150			if (ip_invalid(ip))
151				break;
152		}
153		if (!store_ip(consume_entry, cookie, entry, perf, ip))
154			break;
 
 
 
 
155		first = false;
156	}
157	pagefault_enable();
158}
159
160void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
161			  const struct pt_regs *regs)
162{
163	arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
164}
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Stack trace management functions
  4 *
  5 *  Copyright IBM Corp. 2006
  6 */
  7
 
  8#include <linux/stacktrace.h>
  9#include <linux/uaccess.h>
 10#include <linux/compat.h>
 11#include <asm/stacktrace.h>
 12#include <asm/unwind.h>
 13#include <asm/kprobes.h>
 14#include <asm/ptrace.h>
 15
 16void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
 17		     struct task_struct *task, struct pt_regs *regs)
 18{
 19	struct unwind_state state;
 20	unsigned long addr;
 21
 22	unwind_for_each_frame(&state, task, regs, 0) {
 23		addr = unwind_get_return_address(&state);
 24		if (!addr || !consume_entry(cookie, addr))
 25			break;
 26	}
 27}
 28
 29int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
 30			     void *cookie, struct task_struct *task)
 31{
 32	struct unwind_state state;
 33	unsigned long addr;
 34
 35	unwind_for_each_frame(&state, task, NULL, 0) {
 36		if (state.stack_info.type != STACK_TYPE_TASK)
 37			return -EINVAL;
 38
 39		if (state.regs)
 40			return -EINVAL;
 41
 42		addr = unwind_get_return_address(&state);
 43		if (!addr)
 44			return -EINVAL;
 45
 46#ifdef CONFIG_RETHOOK
 47		/*
 48		 * Mark stacktraces with krethook functions on them
 49		 * as unreliable.
 50		 */
 51		if (state.ip == (unsigned long)arch_rethook_trampoline)
 52			return -EINVAL;
 53#endif
 54
 55		if (!consume_entry(cookie, addr))
 56			return -EINVAL;
 57	}
 58
 59	/* Check for stack corruption */
 60	if (unwind_error(&state))
 61		return -EINVAL;
 62	return 0;
 63}
 64
 65void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
 66			  const struct pt_regs *regs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67{
 
 68	struct stack_frame_user __user *sf;
 69	unsigned long ip, sp;
 70	bool first = true;
 71
 72	if (is_compat_task())
 73		return;
 74	if (!consume_entry(cookie, instruction_pointer(regs)))
 
 
 
 75		return;
 76	sf = (void __user *)user_stack_pointer(regs);
 77	pagefault_disable();
 78	while (1) {
 79		if (__get_user(sp, &sf->back_chain))
 80			break;
 81		if (__get_user(ip, &sf->gprs[8]))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 82			break;
 83		if (ip & 0x1) {
 84			/*
 85			 * If the instruction address is invalid, and this
 86			 * is the first stack frame, assume r14 has not
 87			 * been written to the stack yet. Otherwise exit.
 88			 */
 89			if (first && !(regs->gprs[14] & 0x1))
 90				ip = regs->gprs[14];
 91			else
 
 92				break;
 93		}
 94		if (!consume_entry(cookie, ip))
 95			break;
 96		/* Sanity check: ABI requires SP to be aligned 8 bytes. */
 97		if (!sp || sp & 0x7)
 98			break;
 99		sf = (void __user *)sp;
100		first = false;
101	}
102	pagefault_enable();
 
 
 
 
 
 
103}