Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * ARM callchain support
  4 *
  5 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
  6 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
  7 *
  8 * This code is based on the ARM OProfile backtrace code.
  9 */
 10#include <linux/perf_event.h>
 11#include <linux/uaccess.h>
 12
 13#include <asm/stacktrace.h>
 14
 15/*
 16 * The registers we're interested in are at the end of the variable
 17 * length saved register structure. The fp points at the end of this
 18 * structure so the address of this struct is:
 19 * (struct frame_tail *)(xxx->fp)-1
 20 *
 21 * This code has been adapted from the ARM OProfile support.
 22 */
 23struct frame_tail {
 24	struct frame_tail __user *fp;
 25	unsigned long sp;
 26	unsigned long lr;
 27} __attribute__((packed));
 28
 29/*
 30 * Get the return address for a single stackframe and return a pointer to the
 31 * next frame tail.
 32 */
 33static struct frame_tail __user *
 34user_backtrace(struct frame_tail __user *tail,
 35	       struct perf_callchain_entry_ctx *entry)
 36{
 37	struct frame_tail buftail;
 38	unsigned long err;
 39
 40	if (!access_ok(tail, sizeof(buftail)))
 41		return NULL;
 42
 43	pagefault_disable();
 44	err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail));
 45	pagefault_enable();
 46
 47	if (err)
 48		return NULL;
 49
 50	perf_callchain_store(entry, buftail.lr);
 51
 52	/*
 53	 * Frame pointers should strictly progress back up the stack
 54	 * (towards higher addresses).
 55	 */
 56	if (tail + 1 >= buftail.fp)
 57		return NULL;
 58
 59	return buftail.fp - 1;
 60}
 61
 62void
 63perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
 64{
 65	struct frame_tail __user *tail;
 66
 67	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
 68		/* We don't support guest os callchain now */
 69		return;
 70	}
 71
 72	perf_callchain_store(entry, regs->ARM_pc);
 73
 74	if (!current->mm)
 75		return;
 76
 77	tail = (struct frame_tail __user *)regs->ARM_fp - 1;
 78
 79	while ((entry->nr < entry->max_stack) &&
 80	       tail && !((unsigned long)tail & 0x3))
 81		tail = user_backtrace(tail, entry);
 82}
 83
 84/*
 85 * Gets called by walk_stackframe() for every stackframe. This will be called
 86 * whist unwinding the stackframe and is like a subroutine return so we use
 87 * the PC.
 88 */
 89static int
 90callchain_trace(struct stackframe *fr,
 91		void *data)
 92{
 93	struct perf_callchain_entry_ctx *entry = data;
 94	perf_callchain_store(entry, fr->pc);
 95	return 0;
 96}
 97
 98void
 99perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
100{
101	struct stackframe fr;
102
103	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
104		/* We don't support guest os callchain now */
105		return;
106	}
107
108	arm_get_current_stackframe(regs, &fr);
109	walk_stackframe(&fr, callchain_trace, entry);
110}
111
112unsigned long perf_instruction_pointer(struct pt_regs *regs)
113{
114	if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
115		return perf_guest_cbs->get_guest_ip();
116
117	return instruction_pointer(regs);
118}
119
120unsigned long perf_misc_flags(struct pt_regs *regs)
121{
122	int misc = 0;
123
124	if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
125		if (perf_guest_cbs->is_user_mode())
126			misc |= PERF_RECORD_MISC_GUEST_USER;
127		else
128			misc |= PERF_RECORD_MISC_GUEST_KERNEL;
129	} else {
130		if (user_mode(regs))
131			misc |= PERF_RECORD_MISC_USER;
132		else
133			misc |= PERF_RECORD_MISC_KERNEL;
134	}
135
136	return misc;
137}