Loading...
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/ptrace.h
4 *
5 * Copyright (C) 1996-2003 Russell King
6 */
7#ifndef __ASM_ARM_PTRACE_H
8#define __ASM_ARM_PTRACE_H
9
10#include <uapi/asm/ptrace.h>
11
12#ifndef __ASSEMBLY__
13#include <linux/bitfield.h>
14#include <linux/types.h>
15
16struct pt_regs {
17 unsigned long uregs[18];
18};
19
20struct svc_pt_regs {
21 struct pt_regs regs;
22 u32 dacr;
23 u32 ttbcr;
24};
25
26#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
27
28#define user_mode(regs) \
29 (((regs)->ARM_cpsr & 0xf) == 0)
30
31#ifdef CONFIG_ARM_THUMB
32#define thumb_mode(regs) \
33 (((regs)->ARM_cpsr & PSR_T_BIT))
34#else
35#define thumb_mode(regs) (0)
36#endif
37
38#ifndef CONFIG_CPU_V7M
39#define isa_mode(regs) \
40 (FIELD_GET(PSR_J_BIT, (regs)->ARM_cpsr) << 1 | \
41 FIELD_GET(PSR_T_BIT, (regs)->ARM_cpsr))
42#else
43#define isa_mode(regs) 1 /* Thumb */
44#endif
45
46#define processor_mode(regs) \
47 ((regs)->ARM_cpsr & MODE_MASK)
48
49#define interrupts_enabled(regs) \
50 (!((regs)->ARM_cpsr & PSR_I_BIT))
51
52#define fast_interrupts_enabled(regs) \
53 (!((regs)->ARM_cpsr & PSR_F_BIT))
54
55/* Are the current registers suitable for user mode?
56 * (used to maintain security in signal handlers)
57 */
58static inline int valid_user_regs(struct pt_regs *regs)
59{
60#ifndef CONFIG_CPU_V7M
61 unsigned long mode = regs->ARM_cpsr & MODE_MASK;
62
63 /*
64 * Always clear the F (FIQ) and A (delayed abort) bits
65 */
66 regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
67
68 if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
69 if (mode == USR_MODE)
70 return 1;
71 if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
72 return 1;
73 }
74
75 /*
76 * Force CPSR to something logical...
77 */
78 regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
79 if (!(elf_hwcap & HWCAP_26BIT))
80 regs->ARM_cpsr |= USR_MODE;
81
82 return 0;
83#else /* ifndef CONFIG_CPU_V7M */
84 return 1;
85#endif
86}
87
88static inline long regs_return_value(struct pt_regs *regs)
89{
90 return regs->ARM_r0;
91}
92
93#define instruction_pointer(regs) (regs)->ARM_pc
94
95#ifdef CONFIG_THUMB2_KERNEL
96#define frame_pointer(regs) (regs)->ARM_r7
97#else
98#define frame_pointer(regs) (regs)->ARM_fp
99#endif
100
101static inline void instruction_pointer_set(struct pt_regs *regs,
102 unsigned long val)
103{
104 instruction_pointer(regs) = val;
105}
106
107#ifdef CONFIG_SMP
108extern unsigned long profile_pc(struct pt_regs *regs);
109#else
110#define profile_pc(regs) instruction_pointer(regs)
111#endif
112
113#define predicate(x) ((x) & 0xf0000000)
114#define PREDICATE_ALWAYS 0xe0000000
115
116/*
117 * True if instr is a 32-bit thumb instruction. This works if instr
118 * is the first or only half-word of a thumb instruction. It also works
119 * when instr holds all 32-bits of a wide thumb instruction if stored
120 * in the form (first_half<<16)|(second_half)
121 */
122#define is_wide_instruction(instr) ((unsigned)(instr) >= 0xe800)
123
124/*
125 * kprobe-based event tracer support
126 */
127#include <linux/compiler.h>
128#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
129
130extern int regs_query_register_offset(const char *name);
131extern const char *regs_query_register_name(unsigned int offset);
132extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
133extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
134 unsigned int n);
135
136/**
137 * regs_get_register() - get register value from its offset
138 * @regs: pt_regs from which register value is gotten
139 * @offset: offset number of the register.
140 *
141 * regs_get_register returns the value of a register whose offset from @regs.
142 * The @offset is the offset of the register in struct pt_regs.
143 * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
144 */
145static inline unsigned long regs_get_register(struct pt_regs *regs,
146 unsigned int offset)
147{
148 if (unlikely(offset > MAX_REG_OFFSET))
149 return 0;
150 return *(unsigned long *)((unsigned long)regs + offset);
151}
152
153/* Valid only for Kernel mode traps. */
154static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
155{
156 return regs->ARM_sp;
157}
158
159static inline unsigned long user_stack_pointer(struct pt_regs *regs)
160{
161 return regs->ARM_sp;
162}
163
164#define current_pt_regs(void) ({ (struct pt_regs *) \
165 ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
166})
167
168static inline void regs_set_return_value(struct pt_regs *regs, unsigned long rc)
169{
170 regs->ARM_r0 = rc;
171}
172
173/*
174 * Update ITSTATE after normal execution of an IT block instruction.
175 *
176 * The 8 IT state bits are split into two parts in CPSR:
177 * ITSTATE<1:0> are in CPSR<26:25>
178 * ITSTATE<7:2> are in CPSR<15:10>
179 */
180static inline unsigned long it_advance(unsigned long cpsr)
181{
182 if ((cpsr & 0x06000400) == 0) {
183 /* ITSTATE<2:0> == 0 means end of IT block, so clear IT state */
184 cpsr &= ~PSR_IT_MASK;
185 } else {
186 /* We need to shift left ITSTATE<4:0> */
187 const unsigned long mask = 0x06001c00; /* Mask ITSTATE<4:0> */
188 unsigned long it = cpsr & mask;
189 it <<= 1;
190 it |= it >> (27 - 10); /* Carry ITSTATE<2> to correct place */
191 it &= mask;
192 cpsr &= ~mask;
193 cpsr |= it;
194 }
195 return cpsr;
196}
197
198int syscall_trace_enter(struct pt_regs *regs);
199void syscall_trace_exit(struct pt_regs *regs);
200
201#endif /* __ASSEMBLY__ */
202#endif
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * arch/arm/include/asm/ptrace.h
4 *
5 * Copyright (C) 1996-2003 Russell King
6 */
7#ifndef __ASM_ARM_PTRACE_H
8#define __ASM_ARM_PTRACE_H
9
10#include <uapi/asm/ptrace.h>
11
12#ifndef __ASSEMBLY__
13#include <linux/types.h>
14
15struct pt_regs {
16 unsigned long uregs[18];
17};
18
19struct svc_pt_regs {
20 struct pt_regs regs;
21 u32 dacr;
22 u32 addr_limit;
23};
24
25#define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
26
27#define user_mode(regs) \
28 (((regs)->ARM_cpsr & 0xf) == 0)
29
30#ifdef CONFIG_ARM_THUMB
31#define thumb_mode(regs) \
32 (((regs)->ARM_cpsr & PSR_T_BIT))
33#else
34#define thumb_mode(regs) (0)
35#endif
36
37#ifndef CONFIG_CPU_V7M
38#define isa_mode(regs) \
39 ((((regs)->ARM_cpsr & PSR_J_BIT) >> (__ffs(PSR_J_BIT) - 1)) | \
40 (((regs)->ARM_cpsr & PSR_T_BIT) >> (__ffs(PSR_T_BIT))))
41#else
42#define isa_mode(regs) 1 /* Thumb */
43#endif
44
45#define processor_mode(regs) \
46 ((regs)->ARM_cpsr & MODE_MASK)
47
48#define interrupts_enabled(regs) \
49 (!((regs)->ARM_cpsr & PSR_I_BIT))
50
51#define fast_interrupts_enabled(regs) \
52 (!((regs)->ARM_cpsr & PSR_F_BIT))
53
54/* Are the current registers suitable for user mode?
55 * (used to maintain security in signal handlers)
56 */
57static inline int valid_user_regs(struct pt_regs *regs)
58{
59#ifndef CONFIG_CPU_V7M
60 unsigned long mode = regs->ARM_cpsr & MODE_MASK;
61
62 /*
63 * Always clear the F (FIQ) and A (delayed abort) bits
64 */
65 regs->ARM_cpsr &= ~(PSR_F_BIT | PSR_A_BIT);
66
67 if ((regs->ARM_cpsr & PSR_I_BIT) == 0) {
68 if (mode == USR_MODE)
69 return 1;
70 if (elf_hwcap & HWCAP_26BIT && mode == USR26_MODE)
71 return 1;
72 }
73
74 /*
75 * Force CPSR to something logical...
76 */
77 regs->ARM_cpsr &= PSR_f | PSR_s | PSR_x | PSR_T_BIT | MODE32_BIT;
78 if (!(elf_hwcap & HWCAP_26BIT))
79 regs->ARM_cpsr |= USR_MODE;
80
81 return 0;
82#else /* ifndef CONFIG_CPU_V7M */
83 return 1;
84#endif
85}
86
87static inline long regs_return_value(struct pt_regs *regs)
88{
89 return regs->ARM_r0;
90}
91
92#define instruction_pointer(regs) (regs)->ARM_pc
93
94#ifdef CONFIG_THUMB2_KERNEL
95#define frame_pointer(regs) (regs)->ARM_r7
96#else
97#define frame_pointer(regs) (regs)->ARM_fp
98#endif
99
100static inline void instruction_pointer_set(struct pt_regs *regs,
101 unsigned long val)
102{
103 instruction_pointer(regs) = val;
104}
105
106#ifdef CONFIG_SMP
107extern unsigned long profile_pc(struct pt_regs *regs);
108#else
109#define profile_pc(regs) instruction_pointer(regs)
110#endif
111
112#define predicate(x) ((x) & 0xf0000000)
113#define PREDICATE_ALWAYS 0xe0000000
114
115/*
116 * True if instr is a 32-bit thumb instruction. This works if instr
117 * is the first or only half-word of a thumb instruction. It also works
118 * when instr holds all 32-bits of a wide thumb instruction if stored
119 * in the form (first_half<<16)|(second_half)
120 */
121#define is_wide_instruction(instr) ((unsigned)(instr) >= 0xe800)
122
123/*
124 * kprobe-based event tracer support
125 */
126#include <linux/compiler.h>
127#define MAX_REG_OFFSET (offsetof(struct pt_regs, ARM_ORIG_r0))
128
129extern int regs_query_register_offset(const char *name);
130extern const char *regs_query_register_name(unsigned int offset);
131extern bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr);
132extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
133 unsigned int n);
134
135/**
136 * regs_get_register() - get register value from its offset
137 * @regs: pt_regs from which register value is gotten
138 * @offset: offset number of the register.
139 *
140 * regs_get_register returns the value of a register whose offset from @regs.
141 * The @offset is the offset of the register in struct pt_regs.
142 * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
143 */
144static inline unsigned long regs_get_register(struct pt_regs *regs,
145 unsigned int offset)
146{
147 if (unlikely(offset > MAX_REG_OFFSET))
148 return 0;
149 return *(unsigned long *)((unsigned long)regs + offset);
150}
151
152/* Valid only for Kernel mode traps. */
153static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
154{
155 return regs->ARM_sp;
156}
157
158static inline unsigned long user_stack_pointer(struct pt_regs *regs)
159{
160 return regs->ARM_sp;
161}
162
163#define current_pt_regs(void) ({ (struct pt_regs *) \
164 ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
165})
166
167#endif /* __ASSEMBLY__ */
168#endif