Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Chen Liqin <liqin.chen@sunplusct.com>
5 * Lennox Wu <lennox.wu@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
7 * Copyright (C) 2017 SiFive
8 */
9
10#include <linux/cpu.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/sched/debug.h>
14#include <linux/sched/task_stack.h>
15#include <linux/tick.h>
16#include <linux/ptrace.h>
17#include <linux/uaccess.h>
18
19#include <asm/unistd.h>
20#include <asm/processor.h>
21#include <asm/csr.h>
22#include <asm/stacktrace.h>
23#include <asm/string.h>
24#include <asm/switch_to.h>
25#include <asm/thread_info.h>
26#include <asm/cpuidle.h>
27#include <asm/vector.h>
28#include <asm/cpufeature.h>
29
30register unsigned long gp_in_global __asm__("gp");
31
32#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
33#include <linux/stackprotector.h>
34unsigned long __stack_chk_guard __read_mostly;
35EXPORT_SYMBOL(__stack_chk_guard);
36#endif
37
38extern asmlinkage void ret_from_fork(void);
39
40void arch_cpu_idle(void)
41{
42 cpu_do_idle();
43}
44
45int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
46{
47 if (!unaligned_ctl_available())
48 return -EINVAL;
49
50 tsk->thread.align_ctl = val;
51 return 0;
52}
53
54int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
55{
56 if (!unaligned_ctl_available())
57 return -EINVAL;
58
59 return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr);
60}
61
62void __show_regs(struct pt_regs *regs)
63{
64 show_regs_print_info(KERN_DEFAULT);
65
66 if (!user_mode(regs)) {
67 pr_cont("epc : %pS\n", (void *)regs->epc);
68 pr_cont(" ra : %pS\n", (void *)regs->ra);
69 }
70
71 pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
72 regs->epc, regs->ra, regs->sp);
73 pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
74 regs->gp, regs->tp, regs->t0);
75 pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
76 regs->t1, regs->t2, regs->s0);
77 pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
78 regs->s1, regs->a0, regs->a1);
79 pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
80 regs->a2, regs->a3, regs->a4);
81 pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
82 regs->a5, regs->a6, regs->a7);
83 pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
84 regs->s2, regs->s3, regs->s4);
85 pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
86 regs->s5, regs->s6, regs->s7);
87 pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
88 regs->s8, regs->s9, regs->s10);
89 pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
90 regs->s11, regs->t3, regs->t4);
91 pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
92 regs->t5, regs->t6);
93
94 pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
95 regs->status, regs->badaddr, regs->cause);
96}
97void show_regs(struct pt_regs *regs)
98{
99 __show_regs(regs);
100 if (!user_mode(regs))
101 dump_backtrace(regs, NULL, KERN_DEFAULT);
102}
103
104#ifdef CONFIG_COMPAT
105static bool compat_mode_supported __read_mostly;
106
107bool compat_elf_check_arch(Elf32_Ehdr *hdr)
108{
109 return compat_mode_supported &&
110 hdr->e_machine == EM_RISCV &&
111 hdr->e_ident[EI_CLASS] == ELFCLASS32;
112}
113
114static int __init compat_mode_detect(void)
115{
116 unsigned long tmp = csr_read(CSR_STATUS);
117
118 csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
119 compat_mode_supported =
120 (csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
121
122 csr_write(CSR_STATUS, tmp);
123
124 pr_info("riscv: ELF compat mode %s",
125 compat_mode_supported ? "supported" : "unsupported");
126
127 return 0;
128}
129early_initcall(compat_mode_detect);
130#endif
131
132void start_thread(struct pt_regs *regs, unsigned long pc,
133 unsigned long sp)
134{
135 regs->status = SR_PIE;
136 if (has_fpu()) {
137 regs->status |= SR_FS_INITIAL;
138 /*
139 * Restore the initial value to the FP register
140 * before starting the user program.
141 */
142 fstate_restore(current, regs);
143 }
144 regs->epc = pc;
145 regs->sp = sp;
146
147#ifdef CONFIG_64BIT
148 regs->status &= ~SR_UXL;
149
150 if (is_compat_task())
151 regs->status |= SR_UXL_32;
152 else
153 regs->status |= SR_UXL_64;
154#endif
155}
156
157void flush_thread(void)
158{
159#ifdef CONFIG_FPU
160 /*
161 * Reset FPU state and context
162 * frm: round to nearest, ties to even (IEEE default)
163 * fflags: accrued exceptions cleared
164 */
165 fstate_off(current, task_pt_regs(current));
166 memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
167#endif
168#ifdef CONFIG_RISCV_ISA_V
169 /* Reset vector state */
170 riscv_v_vstate_ctrl_init(current);
171 riscv_v_vstate_off(task_pt_regs(current));
172 kfree(current->thread.vstate.datap);
173 memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
174 clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
175#endif
176}
177
178void arch_release_task_struct(struct task_struct *tsk)
179{
180 /* Free the vector context of datap. */
181 if (has_vector())
182 riscv_v_thread_free(tsk);
183}
184
185int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
186{
187 fstate_save(src, task_pt_regs(src));
188 *dst = *src;
189 /* clear entire V context, including datap for a new task */
190 memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
191 memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
192 clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
193
194 return 0;
195}
196
197int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
198{
199 unsigned long clone_flags = args->flags;
200 unsigned long usp = args->stack;
201 unsigned long tls = args->tls;
202 struct pt_regs *childregs = task_pt_regs(p);
203
204 memset(&p->thread.s, 0, sizeof(p->thread.s));
205
206 /* p->thread holds context to be restored by __switch_to() */
207 if (unlikely(args->fn)) {
208 /* Kernel thread */
209 memset(childregs, 0, sizeof(struct pt_regs));
210 childregs->gp = gp_in_global;
211 /* Supervisor/Machine, irqs on: */
212 childregs->status = SR_PP | SR_PIE;
213
214 p->thread.s[0] = (unsigned long)args->fn;
215 p->thread.s[1] = (unsigned long)args->fn_arg;
216 } else {
217 *childregs = *(current_pt_regs());
218 /* Turn off status.VS */
219 riscv_v_vstate_off(childregs);
220 if (usp) /* User fork */
221 childregs->sp = usp;
222 if (clone_flags & CLONE_SETTLS)
223 childregs->tp = tls;
224 childregs->a0 = 0; /* Return value of fork() */
225 p->thread.s[0] = 0;
226 }
227 p->thread.riscv_v_flags = 0;
228 if (has_vector())
229 riscv_v_thread_alloc(p);
230 p->thread.ra = (unsigned long)ret_from_fork;
231 p->thread.sp = (unsigned long)childregs; /* kernel sp */
232 return 0;
233}
234
235void __init arch_task_cache_init(void)
236{
237 riscv_v_setup_ctx_cache();
238}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
4 * Chen Liqin <liqin.chen@sunplusct.com>
5 * Lennox Wu <lennox.wu@sunplusct.com>
6 * Copyright (C) 2012 Regents of the University of California
7 * Copyright (C) 2017 SiFive
8 */
9
10#include <linux/bitfield.h>
11#include <linux/cpu.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/sched/debug.h>
15#include <linux/sched/task_stack.h>
16#include <linux/tick.h>
17#include <linux/ptrace.h>
18#include <linux/uaccess.h>
19#include <linux/personality.h>
20
21#include <asm/unistd.h>
22#include <asm/processor.h>
23#include <asm/csr.h>
24#include <asm/stacktrace.h>
25#include <asm/string.h>
26#include <asm/switch_to.h>
27#include <asm/thread_info.h>
28#include <asm/cpuidle.h>
29#include <asm/vector.h>
30#include <asm/cpufeature.h>
31#include <asm/exec.h>
32
33#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
34#include <linux/stackprotector.h>
35unsigned long __stack_chk_guard __read_mostly;
36EXPORT_SYMBOL(__stack_chk_guard);
37#endif
38
39extern asmlinkage void ret_from_fork(void);
40
41void noinstr arch_cpu_idle(void)
42{
43 cpu_do_idle();
44}
45
46int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
47{
48 if (!unaligned_ctl_available())
49 return -EINVAL;
50
51 tsk->thread.align_ctl = val;
52 return 0;
53}
54
55int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
56{
57 if (!unaligned_ctl_available())
58 return -EINVAL;
59
60 return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr);
61}
62
63void __show_regs(struct pt_regs *regs)
64{
65 show_regs_print_info(KERN_DEFAULT);
66
67 if (!user_mode(regs)) {
68 pr_cont("epc : %pS\n", (void *)regs->epc);
69 pr_cont(" ra : %pS\n", (void *)regs->ra);
70 }
71
72 pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
73 regs->epc, regs->ra, regs->sp);
74 pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
75 regs->gp, regs->tp, regs->t0);
76 pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
77 regs->t1, regs->t2, regs->s0);
78 pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
79 regs->s1, regs->a0, regs->a1);
80 pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
81 regs->a2, regs->a3, regs->a4);
82 pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
83 regs->a5, regs->a6, regs->a7);
84 pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
85 regs->s2, regs->s3, regs->s4);
86 pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
87 regs->s5, regs->s6, regs->s7);
88 pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
89 regs->s8, regs->s9, regs->s10);
90 pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
91 regs->s11, regs->t3, regs->t4);
92 pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
93 regs->t5, regs->t6);
94
95 pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
96 regs->status, regs->badaddr, regs->cause);
97}
98void show_regs(struct pt_regs *regs)
99{
100 __show_regs(regs);
101 if (!user_mode(regs))
102 dump_backtrace(regs, NULL, KERN_DEFAULT);
103}
104
105unsigned long arch_align_stack(unsigned long sp)
106{
107 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
108 sp -= get_random_u32_below(PAGE_SIZE);
109 return sp & ~0xf;
110}
111
112#ifdef CONFIG_COMPAT
113static bool compat_mode_supported __read_mostly;
114
115bool compat_elf_check_arch(Elf32_Ehdr *hdr)
116{
117 return compat_mode_supported &&
118 hdr->e_machine == EM_RISCV &&
119 hdr->e_ident[EI_CLASS] == ELFCLASS32;
120}
121
122static int __init compat_mode_detect(void)
123{
124 unsigned long tmp = csr_read(CSR_STATUS);
125
126 csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
127 compat_mode_supported =
128 (csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
129
130 csr_write(CSR_STATUS, tmp);
131
132 pr_info("riscv: ELF compat mode %s",
133 compat_mode_supported ? "supported" : "unsupported");
134
135 return 0;
136}
137early_initcall(compat_mode_detect);
138#endif
139
140void start_thread(struct pt_regs *regs, unsigned long pc,
141 unsigned long sp)
142{
143 regs->status = SR_PIE;
144 if (has_fpu()) {
145 regs->status |= SR_FS_INITIAL;
146 /*
147 * Restore the initial value to the FP register
148 * before starting the user program.
149 */
150 fstate_restore(current, regs);
151 }
152 regs->epc = pc;
153 regs->sp = sp;
154
155#ifdef CONFIG_64BIT
156 regs->status &= ~SR_UXL;
157
158 if (is_compat_task())
159 regs->status |= SR_UXL_32;
160 else
161 regs->status |= SR_UXL_64;
162#endif
163}
164
165void flush_thread(void)
166{
167#ifdef CONFIG_FPU
168 /*
169 * Reset FPU state and context
170 * frm: round to nearest, ties to even (IEEE default)
171 * fflags: accrued exceptions cleared
172 */
173 fstate_off(current, task_pt_regs(current));
174 memset(¤t->thread.fstate, 0, sizeof(current->thread.fstate));
175#endif
176#ifdef CONFIG_RISCV_ISA_V
177 /* Reset vector state */
178 riscv_v_vstate_ctrl_init(current);
179 riscv_v_vstate_off(task_pt_regs(current));
180 kfree(current->thread.vstate.datap);
181 memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
182 clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
183#endif
184#ifdef CONFIG_RISCV_ISA_SUPM
185 if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
186 envcfg_update_bits(current, ENVCFG_PMM, ENVCFG_PMM_PMLEN_0);
187#endif
188}
189
190void arch_release_task_struct(struct task_struct *tsk)
191{
192 /* Free the vector context of datap. */
193 if (has_vector())
194 riscv_v_thread_free(tsk);
195}
196
197int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
198{
199 fstate_save(src, task_pt_regs(src));
200 *dst = *src;
201 /* clear entire V context, including datap for a new task */
202 memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
203 memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
204 clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
205
206 return 0;
207}
208
209int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
210{
211 unsigned long clone_flags = args->flags;
212 unsigned long usp = args->stack;
213 unsigned long tls = args->tls;
214 struct pt_regs *childregs = task_pt_regs(p);
215
216 /* Ensure all threads in this mm have the same pointer masking mode. */
217 if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM) && p->mm && (clone_flags & CLONE_VM))
218 set_bit(MM_CONTEXT_LOCK_PMLEN, &p->mm->context.flags);
219
220 memset(&p->thread.s, 0, sizeof(p->thread.s));
221
222 /* p->thread holds context to be restored by __switch_to() */
223 if (unlikely(args->fn)) {
224 /* Kernel thread */
225 memset(childregs, 0, sizeof(struct pt_regs));
226 /* Supervisor/Machine, irqs on: */
227 childregs->status = SR_PP | SR_PIE;
228
229 p->thread.s[0] = (unsigned long)args->fn;
230 p->thread.s[1] = (unsigned long)args->fn_arg;
231 } else {
232 *childregs = *(current_pt_regs());
233 /* Turn off status.VS */
234 riscv_v_vstate_off(childregs);
235 if (usp) /* User fork */
236 childregs->sp = usp;
237 if (clone_flags & CLONE_SETTLS)
238 childregs->tp = tls;
239 childregs->a0 = 0; /* Return value of fork() */
240 p->thread.s[0] = 0;
241 }
242 p->thread.riscv_v_flags = 0;
243 if (has_vector())
244 riscv_v_thread_alloc(p);
245 p->thread.ra = (unsigned long)ret_from_fork;
246 p->thread.sp = (unsigned long)childregs; /* kernel sp */
247 return 0;
248}
249
250void __init arch_task_cache_init(void)
251{
252 riscv_v_setup_ctx_cache();
253}
254
255#ifdef CONFIG_RISCV_ISA_SUPM
256enum {
257 PMLEN_0 = 0,
258 PMLEN_7 = 7,
259 PMLEN_16 = 16,
260};
261
262static bool have_user_pmlen_7;
263static bool have_user_pmlen_16;
264
265/*
266 * Control the relaxed ABI allowing tagged user addresses into the kernel.
267 */
268static unsigned int tagged_addr_disabled;
269
270long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
271{
272 unsigned long valid_mask = PR_PMLEN_MASK | PR_TAGGED_ADDR_ENABLE;
273 struct thread_info *ti = task_thread_info(task);
274 struct mm_struct *mm = task->mm;
275 unsigned long pmm;
276 u8 pmlen;
277
278 if (is_compat_thread(ti))
279 return -EINVAL;
280
281 if (arg & ~valid_mask)
282 return -EINVAL;
283
284 /*
285 * Prefer the smallest PMLEN that satisfies the user's request,
286 * in case choosing a larger PMLEN has a performance impact.
287 */
288 pmlen = FIELD_GET(PR_PMLEN_MASK, arg);
289 if (pmlen == PMLEN_0) {
290 pmm = ENVCFG_PMM_PMLEN_0;
291 } else if (pmlen <= PMLEN_7 && have_user_pmlen_7) {
292 pmlen = PMLEN_7;
293 pmm = ENVCFG_PMM_PMLEN_7;
294 } else if (pmlen <= PMLEN_16 && have_user_pmlen_16) {
295 pmlen = PMLEN_16;
296 pmm = ENVCFG_PMM_PMLEN_16;
297 } else {
298 return -EINVAL;
299 }
300
301 /*
302 * Do not allow the enabling of the tagged address ABI if globally
303 * disabled via sysctl abi.tagged_addr_disabled, if pointer masking
304 * is disabled for userspace.
305 */
306 if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen))
307 return -EINVAL;
308
309 if (!(arg & PR_TAGGED_ADDR_ENABLE))
310 pmlen = PMLEN_0;
311
312 if (mmap_write_lock_killable(mm))
313 return -EINTR;
314
315 if (test_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags) && mm->context.pmlen != pmlen) {
316 mmap_write_unlock(mm);
317 return -EBUSY;
318 }
319
320 envcfg_update_bits(task, ENVCFG_PMM, pmm);
321 mm->context.pmlen = pmlen;
322
323 mmap_write_unlock(mm);
324
325 return 0;
326}
327
328long get_tagged_addr_ctrl(struct task_struct *task)
329{
330 struct thread_info *ti = task_thread_info(task);
331 long ret = 0;
332
333 if (is_compat_thread(ti))
334 return -EINVAL;
335
336 /*
337 * The mm context's pmlen is set only when the tagged address ABI is
338 * enabled, so the effective PMLEN must be extracted from envcfg.PMM.
339 */
340 switch (task->thread.envcfg & ENVCFG_PMM) {
341 case ENVCFG_PMM_PMLEN_7:
342 ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7);
343 break;
344 case ENVCFG_PMM_PMLEN_16:
345 ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_16);
346 break;
347 }
348
349 if (task->mm->context.pmlen)
350 ret |= PR_TAGGED_ADDR_ENABLE;
351
352 return ret;
353}
354
355static bool try_to_set_pmm(unsigned long value)
356{
357 csr_set(CSR_ENVCFG, value);
358 return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value;
359}
360
361/*
362 * Global sysctl to disable the tagged user addresses support. This control
363 * only prevents the tagged address ABI enabling via prctl() and does not
364 * disable it for tasks that already opted in to the relaxed ABI.
365 */
366
367static struct ctl_table tagged_addr_sysctl_table[] = {
368 {
369 .procname = "tagged_addr_disabled",
370 .mode = 0644,
371 .data = &tagged_addr_disabled,
372 .maxlen = sizeof(int),
373 .proc_handler = proc_dointvec_minmax,
374 .extra1 = SYSCTL_ZERO,
375 .extra2 = SYSCTL_ONE,
376 },
377};
378
379static int __init tagged_addr_init(void)
380{
381 if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
382 return 0;
383
384 /*
385 * envcfg.PMM is a WARL field. Detect which values are supported.
386 * Assume the supported PMLEN values are the same on all harts.
387 */
388 csr_clear(CSR_ENVCFG, ENVCFG_PMM);
389 have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7);
390 have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16);
391
392 if (!register_sysctl("abi", tagged_addr_sysctl_table))
393 return -EINVAL;
394
395 return 0;
396}
397core_initcall(tagged_addr_init);
398#endif /* CONFIG_RISCV_ISA_SUPM */