Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
  4 *  Chen Liqin <liqin.chen@sunplusct.com>
  5 *  Lennox Wu <lennox.wu@sunplusct.com>
  6 * Copyright (C) 2012 Regents of the University of California
  7 * Copyright (C) 2017 SiFive
  8 */
  9
 
 10#include <linux/cpu.h>
 11#include <linux/kernel.h>
 12#include <linux/sched.h>
 13#include <linux/sched/debug.h>
 14#include <linux/sched/task_stack.h>
 15#include <linux/tick.h>
 16#include <linux/ptrace.h>
 17#include <linux/uaccess.h>
 
 18
 19#include <asm/unistd.h>
 20#include <asm/processor.h>
 21#include <asm/csr.h>
 22#include <asm/stacktrace.h>
 23#include <asm/string.h>
 24#include <asm/switch_to.h>
 25#include <asm/thread_info.h>
 26#include <asm/cpuidle.h>
 27
 28register unsigned long gp_in_global __asm__("gp");
 
 29
 30#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 31#include <linux/stackprotector.h>
 32unsigned long __stack_chk_guard __read_mostly;
 33EXPORT_SYMBOL(__stack_chk_guard);
 34#endif
 35
 36extern asmlinkage void ret_from_fork(void);
 37extern asmlinkage void ret_from_kernel_thread(void);
 38
 39void arch_cpu_idle(void)
 40{
 41	cpu_do_idle();
 42	raw_local_irq_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 43}
 44
 45void __show_regs(struct pt_regs *regs)
 46{
 47	show_regs_print_info(KERN_DEFAULT);
 48
 49	if (!user_mode(regs)) {
 50		pr_cont("epc : %pS\n", (void *)regs->epc);
 51		pr_cont(" ra : %pS\n", (void *)regs->ra);
 52	}
 53
 54	pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
 55		regs->epc, regs->ra, regs->sp);
 56	pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
 57		regs->gp, regs->tp, regs->t0);
 58	pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
 59		regs->t1, regs->t2, regs->s0);
 60	pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
 61		regs->s1, regs->a0, regs->a1);
 62	pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
 63		regs->a2, regs->a3, regs->a4);
 64	pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
 65		regs->a5, regs->a6, regs->a7);
 66	pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
 67		regs->s2, regs->s3, regs->s4);
 68	pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
 69		regs->s5, regs->s6, regs->s7);
 70	pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
 71		regs->s8, regs->s9, regs->s10);
 72	pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
 73		regs->s11, regs->t3, regs->t4);
 74	pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
 75		regs->t5, regs->t6);
 76
 77	pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
 78		regs->status, regs->badaddr, regs->cause);
 79}
 80void show_regs(struct pt_regs *regs)
 81{
 82	__show_regs(regs);
 83	if (!user_mode(regs))
 84		dump_backtrace(regs, NULL, KERN_DEFAULT);
 85}
 86
 
 
 
 
 
 
 
 87#ifdef CONFIG_COMPAT
 88static bool compat_mode_supported __read_mostly;
 89
 90bool compat_elf_check_arch(Elf32_Ehdr *hdr)
 91{
 92	return compat_mode_supported &&
 93	       hdr->e_machine == EM_RISCV &&
 94	       hdr->e_ident[EI_CLASS] == ELFCLASS32;
 95}
 96
 97static int __init compat_mode_detect(void)
 98{
 99	unsigned long tmp = csr_read(CSR_STATUS);
100
101	csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
102	compat_mode_supported =
103			(csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
104
105	csr_write(CSR_STATUS, tmp);
106
107	pr_info("riscv: ELF compat mode %s",
108			compat_mode_supported ? "supported" : "unsupported");
109
110	return 0;
111}
112early_initcall(compat_mode_detect);
113#endif
114
115void start_thread(struct pt_regs *regs, unsigned long pc,
116	unsigned long sp)
117{
118	regs->status = SR_PIE;
119	if (has_fpu()) {
120		regs->status |= SR_FS_INITIAL;
121		/*
122		 * Restore the initial value to the FP register
123		 * before starting the user program.
124		 */
125		fstate_restore(current, regs);
126	}
127	regs->epc = pc;
128	regs->sp = sp;
129
130#ifdef CONFIG_64BIT
131	regs->status &= ~SR_UXL;
132
133	if (is_compat_task())
134		regs->status |= SR_UXL_32;
135	else
136		regs->status |= SR_UXL_64;
137#endif
138}
139
140void flush_thread(void)
141{
142#ifdef CONFIG_FPU
143	/*
144	 * Reset FPU state and context
145	 *	frm: round to nearest, ties to even (IEEE default)
146	 *	fflags: accrued exceptions cleared
147	 */
148	fstate_off(current, task_pt_regs(current));
149	memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
150#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151}
152
153int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
154{
155	fstate_save(src, task_pt_regs(src));
156	*dst = *src;
 
 
 
 
 
157	return 0;
158}
159
160int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
161{
162	unsigned long clone_flags = args->flags;
163	unsigned long usp = args->stack;
164	unsigned long tls = args->tls;
165	struct pt_regs *childregs = task_pt_regs(p);
166
 
 
 
 
167	memset(&p->thread.s, 0, sizeof(p->thread.s));
168
169	/* p->thread holds context to be restored by __switch_to() */
170	if (unlikely(args->fn)) {
171		/* Kernel thread */
172		memset(childregs, 0, sizeof(struct pt_regs));
173		childregs->gp = gp_in_global;
174		/* Supervisor/Machine, irqs on: */
175		childregs->status = SR_PP | SR_PIE;
176
177		p->thread.ra = (unsigned long)ret_from_kernel_thread;
178		p->thread.s[0] = (unsigned long)args->fn;
179		p->thread.s[1] = (unsigned long)args->fn_arg;
180	} else {
181		*childregs = *(current_pt_regs());
 
 
182		if (usp) /* User fork */
183			childregs->sp = usp;
184		if (clone_flags & CLONE_SETTLS)
185			childregs->tp = tls;
186		childregs->a0 = 0; /* Return value of fork() */
187		p->thread.ra = (unsigned long)ret_from_fork;
188	}
 
 
 
 
189	p->thread.sp = (unsigned long)childregs; /* kernel sp */
190	return 0;
191}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
  4 *  Chen Liqin <liqin.chen@sunplusct.com>
  5 *  Lennox Wu <lennox.wu@sunplusct.com>
  6 * Copyright (C) 2012 Regents of the University of California
  7 * Copyright (C) 2017 SiFive
  8 */
  9
 10#include <linux/bitfield.h>
 11#include <linux/cpu.h>
 12#include <linux/kernel.h>
 13#include <linux/sched.h>
 14#include <linux/sched/debug.h>
 15#include <linux/sched/task_stack.h>
 16#include <linux/tick.h>
 17#include <linux/ptrace.h>
 18#include <linux/uaccess.h>
 19#include <linux/personality.h>
 20
 21#include <asm/unistd.h>
 22#include <asm/processor.h>
 23#include <asm/csr.h>
 24#include <asm/stacktrace.h>
 25#include <asm/string.h>
 26#include <asm/switch_to.h>
 27#include <asm/thread_info.h>
 28#include <asm/cpuidle.h>
 29#include <asm/vector.h>
 30#include <asm/cpufeature.h>
 31#include <asm/exec.h>
 32
 33#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 34#include <linux/stackprotector.h>
 35unsigned long __stack_chk_guard __read_mostly;
 36EXPORT_SYMBOL(__stack_chk_guard);
 37#endif
 38
 39extern asmlinkage void ret_from_fork(void);
 
 40
 41void noinstr arch_cpu_idle(void)
 42{
 43	cpu_do_idle();
 44}
 45
 46int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
 47{
 48	if (!unaligned_ctl_available())
 49		return -EINVAL;
 50
 51	tsk->thread.align_ctl = val;
 52	return 0;
 53}
 54
 55int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
 56{
 57	if (!unaligned_ctl_available())
 58		return -EINVAL;
 59
 60	return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr);
 61}
 62
 63void __show_regs(struct pt_regs *regs)
 64{
 65	show_regs_print_info(KERN_DEFAULT);
 66
 67	if (!user_mode(regs)) {
 68		pr_cont("epc : %pS\n", (void *)regs->epc);
 69		pr_cont(" ra : %pS\n", (void *)regs->ra);
 70	}
 71
 72	pr_cont("epc : " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
 73		regs->epc, regs->ra, regs->sp);
 74	pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
 75		regs->gp, regs->tp, regs->t0);
 76	pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
 77		regs->t1, regs->t2, regs->s0);
 78	pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
 79		regs->s1, regs->a0, regs->a1);
 80	pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
 81		regs->a2, regs->a3, regs->a4);
 82	pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
 83		regs->a5, regs->a6, regs->a7);
 84	pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
 85		regs->s2, regs->s3, regs->s4);
 86	pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
 87		regs->s5, regs->s6, regs->s7);
 88	pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
 89		regs->s8, regs->s9, regs->s10);
 90	pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
 91		regs->s11, regs->t3, regs->t4);
 92	pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
 93		regs->t5, regs->t6);
 94
 95	pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
 96		regs->status, regs->badaddr, regs->cause);
 97}
 98void show_regs(struct pt_regs *regs)
 99{
100	__show_regs(regs);
101	if (!user_mode(regs))
102		dump_backtrace(regs, NULL, KERN_DEFAULT);
103}
104
105unsigned long arch_align_stack(unsigned long sp)
106{
107	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
108		sp -= get_random_u32_below(PAGE_SIZE);
109	return sp & ~0xf;
110}
111
112#ifdef CONFIG_COMPAT
113static bool compat_mode_supported __read_mostly;
114
115bool compat_elf_check_arch(Elf32_Ehdr *hdr)
116{
117	return compat_mode_supported &&
118	       hdr->e_machine == EM_RISCV &&
119	       hdr->e_ident[EI_CLASS] == ELFCLASS32;
120}
121
122static int __init compat_mode_detect(void)
123{
124	unsigned long tmp = csr_read(CSR_STATUS);
125
126	csr_write(CSR_STATUS, (tmp & ~SR_UXL) | SR_UXL_32);
127	compat_mode_supported =
128			(csr_read(CSR_STATUS) & SR_UXL) == SR_UXL_32;
129
130	csr_write(CSR_STATUS, tmp);
131
132	pr_info("riscv: ELF compat mode %s",
133			compat_mode_supported ? "supported" : "unsupported");
134
135	return 0;
136}
137early_initcall(compat_mode_detect);
138#endif
139
140void start_thread(struct pt_regs *regs, unsigned long pc,
141	unsigned long sp)
142{
143	regs->status = SR_PIE;
144	if (has_fpu()) {
145		regs->status |= SR_FS_INITIAL;
146		/*
147		 * Restore the initial value to the FP register
148		 * before starting the user program.
149		 */
150		fstate_restore(current, regs);
151	}
152	regs->epc = pc;
153	regs->sp = sp;
154
155#ifdef CONFIG_64BIT
156	regs->status &= ~SR_UXL;
157
158	if (is_compat_task())
159		regs->status |= SR_UXL_32;
160	else
161		regs->status |= SR_UXL_64;
162#endif
163}
164
165void flush_thread(void)
166{
167#ifdef CONFIG_FPU
168	/*
169	 * Reset FPU state and context
170	 *	frm: round to nearest, ties to even (IEEE default)
171	 *	fflags: accrued exceptions cleared
172	 */
173	fstate_off(current, task_pt_regs(current));
174	memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
175#endif
176#ifdef CONFIG_RISCV_ISA_V
177	/* Reset vector state */
178	riscv_v_vstate_ctrl_init(current);
179	riscv_v_vstate_off(task_pt_regs(current));
180	kfree(current->thread.vstate.datap);
181	memset(&current->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
182	clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE);
183#endif
184#ifdef CONFIG_RISCV_ISA_SUPM
185	if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
186		envcfg_update_bits(current, ENVCFG_PMM, ENVCFG_PMM_PMLEN_0);
187#endif
188}
189
190void arch_release_task_struct(struct task_struct *tsk)
191{
192	/* Free the vector context of datap. */
193	if (has_vector())
194		riscv_v_thread_free(tsk);
195}
196
197int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
198{
199	fstate_save(src, task_pt_regs(src));
200	*dst = *src;
201	/* clear entire V context, including datap for a new task */
202	memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state));
203	memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state));
204	clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE);
205
206	return 0;
207}
208
209int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
210{
211	unsigned long clone_flags = args->flags;
212	unsigned long usp = args->stack;
213	unsigned long tls = args->tls;
214	struct pt_regs *childregs = task_pt_regs(p);
215
216	/* Ensure all threads in this mm have the same pointer masking mode. */
217	if (IS_ENABLED(CONFIG_RISCV_ISA_SUPM) && p->mm && (clone_flags & CLONE_VM))
218		set_bit(MM_CONTEXT_LOCK_PMLEN, &p->mm->context.flags);
219
220	memset(&p->thread.s, 0, sizeof(p->thread.s));
221
222	/* p->thread holds context to be restored by __switch_to() */
223	if (unlikely(args->fn)) {
224		/* Kernel thread */
225		memset(childregs, 0, sizeof(struct pt_regs));
 
226		/* Supervisor/Machine, irqs on: */
227		childregs->status = SR_PP | SR_PIE;
228
 
229		p->thread.s[0] = (unsigned long)args->fn;
230		p->thread.s[1] = (unsigned long)args->fn_arg;
231	} else {
232		*childregs = *(current_pt_regs());
233		/* Turn off status.VS */
234		riscv_v_vstate_off(childregs);
235		if (usp) /* User fork */
236			childregs->sp = usp;
237		if (clone_flags & CLONE_SETTLS)
238			childregs->tp = tls;
239		childregs->a0 = 0; /* Return value of fork() */
240		p->thread.s[0] = 0;
241	}
242	p->thread.riscv_v_flags = 0;
243	if (has_vector())
244		riscv_v_thread_alloc(p);
245	p->thread.ra = (unsigned long)ret_from_fork;
246	p->thread.sp = (unsigned long)childregs; /* kernel sp */
247	return 0;
248}
249
250void __init arch_task_cache_init(void)
251{
252	riscv_v_setup_ctx_cache();
253}
254
255#ifdef CONFIG_RISCV_ISA_SUPM
256enum {
257	PMLEN_0 = 0,
258	PMLEN_7 = 7,
259	PMLEN_16 = 16,
260};
261
262static bool have_user_pmlen_7;
263static bool have_user_pmlen_16;
264
265/*
266 * Control the relaxed ABI allowing tagged user addresses into the kernel.
267 */
268static unsigned int tagged_addr_disabled;
269
270long set_tagged_addr_ctrl(struct task_struct *task, unsigned long arg)
271{
272	unsigned long valid_mask = PR_PMLEN_MASK | PR_TAGGED_ADDR_ENABLE;
273	struct thread_info *ti = task_thread_info(task);
274	struct mm_struct *mm = task->mm;
275	unsigned long pmm;
276	u8 pmlen;
277
278	if (is_compat_thread(ti))
279		return -EINVAL;
280
281	if (arg & ~valid_mask)
282		return -EINVAL;
283
284	/*
285	 * Prefer the smallest PMLEN that satisfies the user's request,
286	 * in case choosing a larger PMLEN has a performance impact.
287	 */
288	pmlen = FIELD_GET(PR_PMLEN_MASK, arg);
289	if (pmlen == PMLEN_0) {
290		pmm = ENVCFG_PMM_PMLEN_0;
291	} else if (pmlen <= PMLEN_7 && have_user_pmlen_7) {
292		pmlen = PMLEN_7;
293		pmm = ENVCFG_PMM_PMLEN_7;
294	} else if (pmlen <= PMLEN_16 && have_user_pmlen_16) {
295		pmlen = PMLEN_16;
296		pmm = ENVCFG_PMM_PMLEN_16;
297	} else {
298		return -EINVAL;
299	}
300
301	/*
302	 * Do not allow the enabling of the tagged address ABI if globally
303	 * disabled via sysctl abi.tagged_addr_disabled, if pointer masking
304	 * is disabled for userspace.
305	 */
306	if (arg & PR_TAGGED_ADDR_ENABLE && (tagged_addr_disabled || !pmlen))
307		return -EINVAL;
308
309	if (!(arg & PR_TAGGED_ADDR_ENABLE))
310		pmlen = PMLEN_0;
311
312	if (mmap_write_lock_killable(mm))
313		return -EINTR;
314
315	if (test_bit(MM_CONTEXT_LOCK_PMLEN, &mm->context.flags) && mm->context.pmlen != pmlen) {
316		mmap_write_unlock(mm);
317		return -EBUSY;
318	}
319
320	envcfg_update_bits(task, ENVCFG_PMM, pmm);
321	mm->context.pmlen = pmlen;
322
323	mmap_write_unlock(mm);
324
325	return 0;
326}
327
328long get_tagged_addr_ctrl(struct task_struct *task)
329{
330	struct thread_info *ti = task_thread_info(task);
331	long ret = 0;
332
333	if (is_compat_thread(ti))
334		return -EINVAL;
335
336	/*
337	 * The mm context's pmlen is set only when the tagged address ABI is
338	 * enabled, so the effective PMLEN must be extracted from envcfg.PMM.
339	 */
340	switch (task->thread.envcfg & ENVCFG_PMM) {
341	case ENVCFG_PMM_PMLEN_7:
342		ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_7);
343		break;
344	case ENVCFG_PMM_PMLEN_16:
345		ret = FIELD_PREP(PR_PMLEN_MASK, PMLEN_16);
346		break;
347	}
348
349	if (task->mm->context.pmlen)
350		ret |= PR_TAGGED_ADDR_ENABLE;
351
352	return ret;
353}
354
355static bool try_to_set_pmm(unsigned long value)
356{
357	csr_set(CSR_ENVCFG, value);
358	return (csr_read_clear(CSR_ENVCFG, ENVCFG_PMM) & ENVCFG_PMM) == value;
359}
360
361/*
362 * Global sysctl to disable the tagged user addresses support. This control
363 * only prevents the tagged address ABI enabling via prctl() and does not
364 * disable it for tasks that already opted in to the relaxed ABI.
365 */
366
367static struct ctl_table tagged_addr_sysctl_table[] = {
368	{
369		.procname	= "tagged_addr_disabled",
370		.mode		= 0644,
371		.data		= &tagged_addr_disabled,
372		.maxlen		= sizeof(int),
373		.proc_handler	= proc_dointvec_minmax,
374		.extra1		= SYSCTL_ZERO,
375		.extra2		= SYSCTL_ONE,
376	},
377};
378
379static int __init tagged_addr_init(void)
380{
381	if (!riscv_has_extension_unlikely(RISCV_ISA_EXT_SUPM))
382		return 0;
383
384	/*
385	 * envcfg.PMM is a WARL field. Detect which values are supported.
386	 * Assume the supported PMLEN values are the same on all harts.
387	 */
388	csr_clear(CSR_ENVCFG, ENVCFG_PMM);
389	have_user_pmlen_7 = try_to_set_pmm(ENVCFG_PMM_PMLEN_7);
390	have_user_pmlen_16 = try_to_set_pmm(ENVCFG_PMM_PMLEN_16);
391
392	if (!register_sysctl("abi", tagged_addr_sysctl_table))
393		return -EINVAL;
394
395	return 0;
396}
397core_initcall(tagged_addr_init);
398#endif	/* CONFIG_RISCV_ISA_SUPM */