Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#ifndef _ASM_PROCESSOR_H
6#define _ASM_PROCESSOR_H
7
8#include <linux/atomic.h>
9#include <linux/cpumask.h>
10#include <linux/sizes.h>
11
12#include <asm/cpu.h>
13#include <asm/cpu-info.h>
14#include <asm/loongarch.h>
15#include <asm/vdso/processor.h>
16#include <uapi/asm/ptrace.h>
17#include <uapi/asm/sigcontext.h>
18
19#ifdef CONFIG_32BIT
20
21#define TASK_SIZE 0x80000000UL
22#define TASK_SIZE_MIN TASK_SIZE
23#define STACK_TOP_MAX TASK_SIZE
24
25#define TASK_IS_32BIT_ADDR 1
26
27#endif
28
29#ifdef CONFIG_64BIT
30
31#define TASK_SIZE32 0x100000000UL
32#define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
33
34#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
35#define TASK_SIZE_MIN TASK_SIZE32
36#define STACK_TOP_MAX TASK_SIZE64
37
38#define TASK_SIZE_OF(tsk) \
39 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
40
41#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
42
43#endif
44
45#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
46
47unsigned long stack_top(void);
48#define STACK_TOP stack_top()
49
50/*
51 * This decides where the kernel will search for a free chunk of vm
52 * space during mmap's.
53 */
54#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
55
56#define FPU_REG_WIDTH 256
57#define FPU_ALIGN __attribute__((aligned(32)))
58
59union fpureg {
60 __u32 val32[FPU_REG_WIDTH / 32];
61 __u64 val64[FPU_REG_WIDTH / 64];
62};
63
64#define FPR_IDX(width, idx) (idx)
65
66#define BUILD_FPR_ACCESS(width) \
67static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
68{ \
69 return fpr->val##width[FPR_IDX(width, idx)]; \
70} \
71 \
72static inline void set_fpr##width(union fpureg *fpr, unsigned int idx, \
73 u##width val) \
74{ \
75 fpr->val##width[FPR_IDX(width, idx)] = val; \
76}
77
78BUILD_FPR_ACCESS(32)
79BUILD_FPR_ACCESS(64)
80
81struct loongarch_fpu {
82 unsigned int fcsr;
83 uint64_t fcc; /* 8x8 */
84 union fpureg fpr[NUM_FPU_REGS];
85};
86
87#define INIT_CPUMASK { \
88 {0,} \
89}
90
91#define ARCH_MIN_TASKALIGN 32
92
93struct loongarch_vdso_info;
94
95/*
96 * If you change thread_struct remember to change the #defines below too!
97 */
98struct thread_struct {
99 /* Main processor registers. */
100 unsigned long reg01, reg03, reg22; /* ra sp fp */
101 unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
102 unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
103
104 /* __schedule() return address / call frame address */
105 unsigned long sched_ra;
106 unsigned long sched_cfa;
107
108 /* CSR registers */
109 unsigned long csr_prmd;
110 unsigned long csr_crmd;
111 unsigned long csr_euen;
112 unsigned long csr_ecfg;
113 unsigned long csr_badvaddr; /* Last user fault */
114
115 /* Scratch registers */
116 unsigned long scr0;
117 unsigned long scr1;
118 unsigned long scr2;
119 unsigned long scr3;
120
121 /* Eflags register */
122 unsigned long eflags;
123
124 /* Other stuff associated with the thread. */
125 unsigned long trap_nr;
126 unsigned long error_code;
127 struct loongarch_vdso_info *vdso;
128
129 /*
130 * FPU & vector registers, must be at last because
131 * they are conditionally copied at fork().
132 */
133 struct loongarch_fpu fpu FPU_ALIGN;
134};
135
136#define thread_saved_ra(tsk) (tsk->thread.sched_ra)
137#define thread_saved_fp(tsk) (tsk->thread.sched_cfa)
138
139#define INIT_THREAD { \
140 /* \
141 * Main processor registers \
142 */ \
143 .reg01 = 0, \
144 .reg03 = 0, \
145 .reg22 = 0, \
146 .reg23 = 0, \
147 .reg24 = 0, \
148 .reg25 = 0, \
149 .reg26 = 0, \
150 .reg27 = 0, \
151 .reg28 = 0, \
152 .reg29 = 0, \
153 .reg30 = 0, \
154 .reg31 = 0, \
155 .sched_ra = 0, \
156 .sched_cfa = 0, \
157 .csr_crmd = 0, \
158 .csr_prmd = 0, \
159 .csr_euen = 0, \
160 .csr_ecfg = 0, \
161 .csr_badvaddr = 0, \
162 /* \
163 * Other stuff associated with the process \
164 */ \
165 .trap_nr = 0, \
166 .error_code = 0, \
167 /* \
168 * FPU & vector registers \
169 */ \
170 .fpu = { \
171 .fcsr = 0, \
172 .fcc = 0, \
173 .fpr = {{{0,},},}, \
174 }, \
175}
176
177struct task_struct;
178
179enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL};
180
181extern unsigned long boot_option_idle_override;
182/*
183 * Do necessary setup to start up a newly executed thread.
184 */
185extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp);
186
187static inline void flush_thread(void)
188{
189}
190
191unsigned long __get_wchan(struct task_struct *p);
192
193#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
194 THREAD_SIZE - sizeof(struct pt_regs))
195#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
196#define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
197#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
198#define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen)
199#define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg)
200
201#define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);})
202
203#ifdef CONFIG_CPU_HAS_PREFETCH
204
205#define ARCH_HAS_PREFETCH
206#define prefetch(x) __builtin_prefetch((x), 0, 1)
207
208#define ARCH_HAS_PREFETCHW
209#define prefetchw(x) __builtin_prefetch((x), 1, 1)
210
211#endif
212
213#endif /* _ASM_PROCESSOR_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#ifndef _ASM_PROCESSOR_H
6#define _ASM_PROCESSOR_H
7
8#include <linux/atomic.h>
9#include <linux/cpumask.h>
10#include <linux/sizes.h>
11
12#include <asm/cpu.h>
13#include <asm/cpu-info.h>
14#include <asm/hw_breakpoint.h>
15#include <asm/loongarch.h>
16#include <asm/vdso/processor.h>
17#include <uapi/asm/ptrace.h>
18#include <uapi/asm/sigcontext.h>
19
20#ifdef CONFIG_32BIT
21
22#define TASK_SIZE 0x80000000UL
23#define TASK_SIZE_MIN TASK_SIZE
24#define STACK_TOP_MAX TASK_SIZE
25
26#define TASK_IS_32BIT_ADDR 1
27
28#endif
29
30#ifdef CONFIG_64BIT
31
32#define TASK_SIZE32 0x100000000UL
33#define TASK_SIZE64 (0x1UL << ((cpu_vabits > VA_BITS) ? VA_BITS : cpu_vabits))
34
35#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
36#define TASK_SIZE_MIN TASK_SIZE32
37#define STACK_TOP_MAX TASK_SIZE64
38
39#define TASK_SIZE_OF(tsk) \
40 (test_tsk_thread_flag(tsk, TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
41
42#define TASK_IS_32BIT_ADDR test_thread_flag(TIF_32BIT_ADDR)
43
44#endif
45
46#define VDSO_RANDOMIZE_SIZE (TASK_IS_32BIT_ADDR ? SZ_1M : SZ_64M)
47
48unsigned long stack_top(void);
49#define STACK_TOP stack_top()
50
51/*
52 * This decides where the kernel will search for a free chunk of vm
53 * space during mmap's.
54 */
55#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
56
57#define FPU_REG_WIDTH 256
58#define FPU_ALIGN __attribute__((aligned(32)))
59
60union fpureg {
61 __u32 val32[FPU_REG_WIDTH / 32];
62 __u64 val64[FPU_REG_WIDTH / 64];
63};
64
65#define FPR_IDX(width, idx) (idx)
66
67#define BUILD_FPR_ACCESS(width) \
68static inline u##width get_fpr##width(union fpureg *fpr, unsigned idx) \
69{ \
70 return fpr->val##width[FPR_IDX(width, idx)]; \
71} \
72 \
73static inline void set_fpr##width(union fpureg *fpr, unsigned int idx, \
74 u##width val) \
75{ \
76 fpr->val##width[FPR_IDX(width, idx)] = val; \
77}
78
79BUILD_FPR_ACCESS(32)
80BUILD_FPR_ACCESS(64)
81
82struct loongarch_fpu {
83 uint64_t fcc; /* 8x8 */
84 uint32_t fcsr;
85 uint32_t ftop;
86 union fpureg fpr[NUM_FPU_REGS];
87};
88
89struct loongarch_lbt {
90 /* Scratch registers */
91 unsigned long scr0;
92 unsigned long scr1;
93 unsigned long scr2;
94 unsigned long scr3;
95 /* Eflags register */
96 unsigned long eflags;
97};
98
99#define INIT_CPUMASK { \
100 {0,} \
101}
102
103#define ARCH_MIN_TASKALIGN 32
104
105struct loongarch_vdso_info;
106
107/*
108 * If you change thread_struct remember to change the #defines below too!
109 */
110struct thread_struct {
111 /* Main processor registers. */
112 unsigned long reg01, reg03, reg22; /* ra sp fp */
113 unsigned long reg23, reg24, reg25, reg26; /* s0-s3 */
114 unsigned long reg27, reg28, reg29, reg30, reg31; /* s4-s8 */
115
116 /* __schedule() return address / call frame address */
117 unsigned long sched_ra;
118 unsigned long sched_cfa;
119
120 /* CSR registers */
121 unsigned long csr_prmd;
122 unsigned long csr_crmd;
123 unsigned long csr_euen;
124 unsigned long csr_ecfg;
125 unsigned long csr_badvaddr; /* Last user fault */
126
127 /* Other stuff associated with the thread. */
128 unsigned long trap_nr;
129 unsigned long error_code;
130 unsigned long single_step; /* Used by PTRACE_SINGLESTEP */
131 struct loongarch_vdso_info *vdso;
132
133 /*
134 * FPU & vector registers, must be at the last of inherited
135 * context because they are conditionally copied at fork().
136 */
137 struct loongarch_fpu fpu FPU_ALIGN;
138 struct loongarch_lbt lbt; /* Also conditionally copied */
139
140 /* Hardware breakpoints pinned to this task. */
141 struct perf_event *hbp_break[LOONGARCH_MAX_BRP];
142 struct perf_event *hbp_watch[LOONGARCH_MAX_WRP];
143};
144
145#define thread_saved_ra(tsk) (tsk->thread.sched_ra)
146#define thread_saved_fp(tsk) (tsk->thread.sched_cfa)
147
148#define INIT_THREAD { \
149 /* \
150 * Main processor registers \
151 */ \
152 .reg01 = 0, \
153 .reg03 = 0, \
154 .reg22 = 0, \
155 .reg23 = 0, \
156 .reg24 = 0, \
157 .reg25 = 0, \
158 .reg26 = 0, \
159 .reg27 = 0, \
160 .reg28 = 0, \
161 .reg29 = 0, \
162 .reg30 = 0, \
163 .reg31 = 0, \
164 .sched_ra = 0, \
165 .sched_cfa = 0, \
166 .csr_crmd = 0, \
167 .csr_prmd = 0, \
168 .csr_euen = 0, \
169 .csr_ecfg = 0, \
170 .csr_badvaddr = 0, \
171 /* \
172 * Other stuff associated with the process \
173 */ \
174 .trap_nr = 0, \
175 .error_code = 0, \
176 /* \
177 * FPU & vector registers \
178 */ \
179 .fpu = { \
180 .fcc = 0, \
181 .fcsr = 0, \
182 .ftop = 0, \
183 .fpr = {{{0,},},}, \
184 }, \
185 .hbp_break = {0}, \
186 .hbp_watch = {0}, \
187}
188
189struct task_struct;
190
191enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_HALT, IDLE_NOMWAIT, IDLE_POLL};
192
193extern unsigned long boot_option_idle_override;
194/*
195 * Do necessary setup to start up a newly executed thread.
196 */
197extern void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp);
198
199unsigned long __get_wchan(struct task_struct *p);
200
201#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
202 THREAD_SIZE - sizeof(struct pt_regs))
203#define task_pt_regs(tsk) ((struct pt_regs *)__KSTK_TOS(tsk))
204#define KSTK_EIP(tsk) (task_pt_regs(tsk)->csr_era)
205#define KSTK_ESP(tsk) (task_pt_regs(tsk)->regs[3])
206#define KSTK_EUEN(tsk) (task_pt_regs(tsk)->csr_euen)
207#define KSTK_ECFG(tsk) (task_pt_regs(tsk)->csr_ecfg)
208
209#define return_address() ({__asm__ __volatile__("":::"$1"); __builtin_return_address(0);})
210
211#ifdef CONFIG_CPU_HAS_PREFETCH
212
213#define ARCH_HAS_PREFETCH
214#define prefetch(x) __builtin_prefetch((x), 0, 1)
215
216#define ARCH_HAS_PREFETCHW
217#define prefetchw(x) __builtin_prefetch((x), 1, 1)
218
219#endif
220
221#endif /* _ASM_PROCESSOR_H */