Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Performance counter callchain support - powerpc architecture code
  4 *
  5 * Copyright © 2009 Paul Mackerras, IBM Corporation.
  6 */
  7#include <linux/kernel.h>
  8#include <linux/sched.h>
  9#include <linux/perf_event.h>
 10#include <linux/percpu.h>
 11#include <linux/uaccess.h>
 12#include <linux/mm.h>
 13#include <asm/ptrace.h>
 14#include <asm/sigcontext.h>
 15#include <asm/ucontext.h>
 16#include <asm/vdso.h>
 17#include <asm/pte-walk.h>
 18
 19#include "callchain.h"
 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 21static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret)
 22{
 23	return __read_user_stack(ptr, ret, sizeof(*ret));
 24}
 25
 26/*
 27 * 64-bit user processes use the same stack frame for RT and non-RT signals.
 28 */
 29struct signal_frame_64 {
 30	char		dummy[__SIGNAL_FRAMESIZE];
 31	struct ucontext	uc;
 32	unsigned long	unused[2];
 33	unsigned int	tramp[6];
 34	struct siginfo	*pinfo;
 35	void		*puc;
 36	struct siginfo	info;
 37	char		abigap[288];
 38};
 39
 40static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
 41{
 42	if (nip == fp + offsetof(struct signal_frame_64, tramp))
 43		return 1;
 44	if (current->mm->context.vdso &&
 45	    nip == VDSO64_SYMBOL(current->mm->context.vdso, sigtramp_rt64))
 46		return 1;
 47	return 0;
 48}
 49
 50/*
 51 * Do some sanity checking on the signal frame pointed to by sp.
 52 * We check the pinfo and puc pointers in the frame.
 53 */
 54static int sane_signal_64_frame(unsigned long sp)
 55{
 56	struct signal_frame_64 __user *sf;
 57	unsigned long pinfo, puc;
 58
 59	sf = (struct signal_frame_64 __user *) sp;
 60	if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
 61	    read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
 62		return 0;
 63	return pinfo == (unsigned long) &sf->info &&
 64		puc == (unsigned long) &sf->uc;
 65}
 66
 67void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
 68			    struct pt_regs *regs)
 69{
 70	unsigned long sp, next_sp;
 71	unsigned long next_ip;
 72	unsigned long lr;
 73	long level = 0;
 74	struct signal_frame_64 __user *sigframe;
 75	unsigned long __user *fp, *uregs;
 76
 77	next_ip = perf_instruction_pointer(regs);
 78	lr = regs->link;
 79	sp = regs->gpr[1];
 80	perf_callchain_store(entry, next_ip);
 81
 82	while (entry->nr < entry->max_stack) {
 83		fp = (unsigned long __user *) sp;
 84		if (invalid_user_sp(sp) || read_user_stack_64(fp, &next_sp))
 85			return;
 86		if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
 87			return;
 88
 89		/*
 90		 * Note: the next_sp - sp >= signal frame size check
 91		 * is true when next_sp < sp, which can happen when
 92		 * transitioning from an alternate signal stack to the
 93		 * normal stack.
 94		 */
 95		if (next_sp - sp >= sizeof(struct signal_frame_64) &&
 96		    (is_sigreturn_64_address(next_ip, sp) ||
 97		     (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
 98		    sane_signal_64_frame(sp)) {
 99			/*
100			 * This looks like an signal frame
101			 */
102			sigframe = (struct signal_frame_64 __user *) sp;
103			uregs = sigframe->uc.uc_mcontext.gp_regs;
104			if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
105			    read_user_stack_64(&uregs[PT_LNK], &lr) ||
106			    read_user_stack_64(&uregs[PT_R1], &sp))
107				return;
108			level = 0;
109			perf_callchain_store_context(entry, PERF_CONTEXT_USER);
110			perf_callchain_store(entry, next_ip);
111			continue;
112		}
113
114		if (level == 0)
115			next_ip = lr;
116		perf_callchain_store(entry, next_ip);
117		++level;
118		sp = next_sp;
119	}
120}
v5.9
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Performance counter callchain support - powerpc architecture code
  4 *
  5 * Copyright © 2009 Paul Mackerras, IBM Corporation.
  6 */
  7#include <linux/kernel.h>
  8#include <linux/sched.h>
  9#include <linux/perf_event.h>
 10#include <linux/percpu.h>
 11#include <linux/uaccess.h>
 12#include <linux/mm.h>
 13#include <asm/ptrace.h>
 14#include <asm/sigcontext.h>
 15#include <asm/ucontext.h>
 16#include <asm/vdso.h>
 17#include <asm/pte-walk.h>
 18
 19#include "callchain.h"
 20
 21/*
 22 * On 64-bit we don't want to invoke hash_page on user addresses from
 23 * interrupt context, so if the access faults, we read the page tables
 24 * to find which page (if any) is mapped and access it directly.
 25 */
 26int read_user_stack_slow(const void __user *ptr, void *buf, int nb)
 27{
 28
 29	unsigned long addr = (unsigned long) ptr;
 30	unsigned long offset;
 31	struct page *page;
 32	void *kaddr;
 33
 34	if (get_user_page_fast_only(addr, FOLL_WRITE, &page)) {
 35		kaddr = page_address(page);
 36
 37		/* align address to page boundary */
 38		offset = addr & ~PAGE_MASK;
 39
 40		memcpy(buf, kaddr + offset, nb);
 41		put_page(page);
 42		return 0;
 43	}
 44	return -EFAULT;
 45}
 46
 47static int read_user_stack_64(const unsigned long __user *ptr, unsigned long *ret)
 48{
 49	return __read_user_stack(ptr, ret, sizeof(*ret));
 50}
 51
 52/*
 53 * 64-bit user processes use the same stack frame for RT and non-RT signals.
 54 */
 55struct signal_frame_64 {
 56	char		dummy[__SIGNAL_FRAMESIZE];
 57	struct ucontext	uc;
 58	unsigned long	unused[2];
 59	unsigned int	tramp[6];
 60	struct siginfo	*pinfo;
 61	void		*puc;
 62	struct siginfo	info;
 63	char		abigap[288];
 64};
 65
 66static int is_sigreturn_64_address(unsigned long nip, unsigned long fp)
 67{
 68	if (nip == fp + offsetof(struct signal_frame_64, tramp))
 69		return 1;
 70	if (vdso64_rt_sigtramp && current->mm->context.vdso_base &&
 71	    nip == current->mm->context.vdso_base + vdso64_rt_sigtramp)
 72		return 1;
 73	return 0;
 74}
 75
 76/*
 77 * Do some sanity checking on the signal frame pointed to by sp.
 78 * We check the pinfo and puc pointers in the frame.
 79 */
 80static int sane_signal_64_frame(unsigned long sp)
 81{
 82	struct signal_frame_64 __user *sf;
 83	unsigned long pinfo, puc;
 84
 85	sf = (struct signal_frame_64 __user *) sp;
 86	if (read_user_stack_64((unsigned long __user *) &sf->pinfo, &pinfo) ||
 87	    read_user_stack_64((unsigned long __user *) &sf->puc, &puc))
 88		return 0;
 89	return pinfo == (unsigned long) &sf->info &&
 90		puc == (unsigned long) &sf->uc;
 91}
 92
 93void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
 94			    struct pt_regs *regs)
 95{
 96	unsigned long sp, next_sp;
 97	unsigned long next_ip;
 98	unsigned long lr;
 99	long level = 0;
100	struct signal_frame_64 __user *sigframe;
101	unsigned long __user *fp, *uregs;
102
103	next_ip = perf_instruction_pointer(regs);
104	lr = regs->link;
105	sp = regs->gpr[1];
106	perf_callchain_store(entry, next_ip);
107
108	while (entry->nr < entry->max_stack) {
109		fp = (unsigned long __user *) sp;
110		if (invalid_user_sp(sp) || read_user_stack_64(fp, &next_sp))
111			return;
112		if (level > 0 && read_user_stack_64(&fp[2], &next_ip))
113			return;
114
115		/*
116		 * Note: the next_sp - sp >= signal frame size check
117		 * is true when next_sp < sp, which can happen when
118		 * transitioning from an alternate signal stack to the
119		 * normal stack.
120		 */
121		if (next_sp - sp >= sizeof(struct signal_frame_64) &&
122		    (is_sigreturn_64_address(next_ip, sp) ||
123		     (level <= 1 && is_sigreturn_64_address(lr, sp))) &&
124		    sane_signal_64_frame(sp)) {
125			/*
126			 * This looks like an signal frame
127			 */
128			sigframe = (struct signal_frame_64 __user *) sp;
129			uregs = sigframe->uc.uc_mcontext.gp_regs;
130			if (read_user_stack_64(&uregs[PT_NIP], &next_ip) ||
131			    read_user_stack_64(&uregs[PT_LNK], &lr) ||
132			    read_user_stack_64(&uregs[PT_R1], &sp))
133				return;
134			level = 0;
135			perf_callchain_store_context(entry, PERF_CONTEXT_USER);
136			perf_callchain_store(entry, next_ip);
137			continue;
138		}
139
140		if (level == 0)
141			next_ip = lr;
142		perf_callchain_store(entry, next_ip);
143		++level;
144		sp = next_sp;
145	}
146}