Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This file handles the architecture dependent parts of process handling.
  4 *
  5 *    Copyright IBM Corp. 1999, 2009
  6 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  7 *		 Hartmut Penner <hp@de.ibm.com>,
  8 *		 Denis Joseph Barrow,
  9 */
 10
 11#include <linux/elf-randomize.h>
 12#include <linux/compiler.h>
 13#include <linux/cpu.h>
 14#include <linux/sched.h>
 15#include <linux/sched/debug.h>
 16#include <linux/sched/task.h>
 17#include <linux/sched/task_stack.h>
 18#include <linux/kernel.h>
 19#include <linux/mm.h>
 20#include <linux/elfcore.h>
 21#include <linux/smp.h>
 22#include <linux/slab.h>
 23#include <linux/interrupt.h>
 24#include <linux/tick.h>
 25#include <linux/personality.h>
 26#include <linux/syscalls.h>
 27#include <linux/compat.h>
 28#include <linux/kprobes.h>
 29#include <linux/random.h>
 30#include <linux/export.h>
 31#include <linux/init_task.h>
 32#include <linux/entry-common.h>
 33#include <asm/cpu_mf.h>
 34#include <asm/io.h>
 35#include <asm/processor.h>
 36#include <asm/vtimer.h>
 37#include <asm/exec.h>
 38#include <asm/irq.h>
 39#include <asm/nmi.h>
 40#include <asm/smp.h>
 41#include <asm/stacktrace.h>
 42#include <asm/switch_to.h>
 43#include <asm/runtime_instr.h>
 44#include <asm/unwind.h>
 45#include "entry.h"
 46
 47void ret_from_fork(void) asm("ret_from_fork");
 48
 49void __ret_from_fork(struct task_struct *prev, struct pt_regs *regs)
 
 
 
 
 
 
 
 
 
 
 
 50{
 51	void (*func)(void *arg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 52
 53	schedule_tail(prev);
 54
 55	if (!user_mode(regs)) {
 56		/* Kernel thread */
 57		func = (void *)regs->gprs[9];
 58		func((void *)regs->gprs[10]);
 59	}
 60	clear_pt_regs_flag(regs, PIF_SYSCALL);
 61	syscall_exit_to_user_mode(regs);
 62}
 63
 64void flush_thread(void)
 65{
 66}
 67
 68void arch_setup_new_exec(void)
 69{
 70	if (S390_lowcore.current_pid != current->pid) {
 71		S390_lowcore.current_pid = current->pid;
 72		if (test_facility(40))
 73			lpp(&S390_lowcore.lpp);
 74	}
 75}
 76
 77void arch_release_task_struct(struct task_struct *tsk)
 78{
 79	runtime_instr_release(tsk);
 80	guarded_storage_release(tsk);
 81}
 82
 83int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 84{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 85	/*
 86	 * Save the floating-point or vector register state of the current
 87	 * task and set the CIF_FPU flag to lazy restore the FPU register
 88	 * state when returning to user space.
 89	 */
 90	save_fpu_regs();
 91
 92	memcpy(dst, src, arch_task_struct_size);
 93	dst->thread.fpu.regs = dst->thread.fpu.fprs;
 94
 95	/*
 96	 * Don't transfer over the runtime instrumentation or the guarded
 97	 * storage control block pointers. These fields are cleared here instead
 98	 * of in copy_thread() to avoid premature freeing of associated memory
 99	 * on fork() failure. Wait to clear the RI flag because ->stack still
100	 * refers to the source thread.
101	 */
102	dst->thread.ri_cb = NULL;
103	dst->thread.gs_cb = NULL;
104	dst->thread.gs_bc_cb = NULL;
105
106	return 0;
107}
108
109int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 
110{
111	unsigned long clone_flags = args->flags;
112	unsigned long new_stackp = args->stack;
113	unsigned long tls = args->tls;
114	struct fake_frame
115	{
116		struct stack_frame sf;
117		struct pt_regs childregs;
118	} *frame;
119
120	frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
121	p->thread.ksp = (unsigned long) frame;
122	/* Save access registers to new thread structure. */
123	save_access_regs(&p->thread.acrs[0]);
124	/* start new process with ar4 pointing to the correct address space */
 
125	/* Don't copy debug registers */
126	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
127	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
128	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
129	p->thread.per_flags = 0;
130	/* Initialize per thread user and system timer values */
131	p->thread.user_timer = 0;
132	p->thread.guest_timer = 0;
133	p->thread.system_timer = 0;
134	p->thread.hardirq_timer = 0;
135	p->thread.softirq_timer = 0;
136	p->thread.last_break = 1;
137
138	frame->sf.back_chain = 0;
139	frame->sf.gprs[5] = (unsigned long)frame + sizeof(struct stack_frame);
140	frame->sf.gprs[6] = (unsigned long)p;
141	/* new return point is ret_from_fork */
142	frame->sf.gprs[8] = (unsigned long)ret_from_fork;
143	/* fake return stack for resume(), don't go back to schedule */
144	frame->sf.gprs[9] = (unsigned long)frame;
145
146	/* Store access registers to kernel stack of new process. */
147	if (unlikely(args->fn)) {
148		/* kernel thread */
149		memset(&frame->childregs, 0, sizeof(struct pt_regs));
150		frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
151				PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
152		frame->childregs.psw.addr =
153				(unsigned long)__ret_from_fork;
154		frame->childregs.gprs[9] = (unsigned long)args->fn;
155		frame->childregs.gprs[10] = (unsigned long)args->fn_arg;
 
156		frame->childregs.orig_gpr2 = -1;
157		frame->childregs.last_break = 1;
158		return 0;
159	}
160	frame->childregs = *current_pt_regs();
161	frame->childregs.gprs[2] = 0;	/* child returns 0 on fork. */
162	frame->childregs.flags = 0;
163	if (new_stackp)
164		frame->childregs.gprs[15] = new_stackp;
165	/*
166	 * Clear the runtime instrumentation flag after the above childregs
167	 * copy. The CB pointer was already cleared in arch_dup_task_struct().
168	 */
169	frame->childregs.psw.mask &= ~PSW_MASK_RI;
170
171	/* Set a new TLS ?  */
172	if (clone_flags & CLONE_SETTLS) {
 
173		if (is_compat_task()) {
174			p->thread.acrs[0] = (unsigned int)tls;
175		} else {
176			p->thread.acrs[0] = (unsigned int)(tls >> 32);
177			p->thread.acrs[1] = (unsigned int)tls;
178		}
179	}
180	/*
181	 * s390 stores the svc return address in arch_data when calling
182	 * sigreturn()/restart_syscall() via vdso. 1 means no valid address
183	 * stored.
184	 */
185	p->restart_block.arch_data = 1;
186	return 0;
187}
188
189void execve_tail(void)
190{
191	current->thread.fpu.fpc = 0;
192	asm volatile("sfpc %0" : : "d" (0));
193}
194
195unsigned long __get_wchan(struct task_struct *p)
 
 
 
196{
197	struct unwind_state state;
198	unsigned long ip = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
199
200	if (!task_stack_page(p))
201		return 0;
202
203	if (!try_get_task_stack(p))
 
 
204		return 0;
205
206	unwind_for_each_frame(&state, p, NULL, 0) {
207		if (state.stack_info.type != STACK_TYPE_TASK) {
208			ip = 0;
209			break;
210		}
211
212		ip = unwind_get_return_address(&state);
213		if (!ip)
214			break;
215
216		if (!in_sched_functions(ip))
217			break;
218	}
219
220	put_task_stack(p);
221	return ip;
222}
223
224unsigned long arch_align_stack(unsigned long sp)
225{
226	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
227		sp -= get_random_u32_below(PAGE_SIZE);
228	return sp & ~0xf;
229}
230
231static inline unsigned long brk_rnd(void)
232{
233	return (get_random_u16() & BRK_RND_MASK) << PAGE_SHIFT;
234}
235
236unsigned long arch_randomize_brk(struct mm_struct *mm)
237{
238	unsigned long ret;
239
240	ret = PAGE_ALIGN(mm->brk + brk_rnd());
241	return (ret > mm->brk) ? ret : mm->brk;
242}
v4.6
 
  1/*
  2 * This file handles the architecture dependent parts of process handling.
  3 *
  4 *    Copyright IBM Corp. 1999, 2009
  5 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  6 *		 Hartmut Penner <hp@de.ibm.com>,
  7 *		 Denis Joseph Barrow,
  8 */
  9
 
 10#include <linux/compiler.h>
 11#include <linux/cpu.h>
 12#include <linux/sched.h>
 
 
 
 13#include <linux/kernel.h>
 14#include <linux/mm.h>
 15#include <linux/elfcore.h>
 16#include <linux/smp.h>
 17#include <linux/slab.h>
 18#include <linux/interrupt.h>
 19#include <linux/tick.h>
 20#include <linux/personality.h>
 21#include <linux/syscalls.h>
 22#include <linux/compat.h>
 23#include <linux/kprobes.h>
 24#include <linux/random.h>
 25#include <linux/module.h>
 26#include <linux/init_task.h>
 
 
 27#include <asm/io.h>
 28#include <asm/processor.h>
 29#include <asm/vtimer.h>
 30#include <asm/exec.h>
 31#include <asm/irq.h>
 32#include <asm/nmi.h>
 33#include <asm/smp.h>
 
 34#include <asm/switch_to.h>
 35#include <asm/runtime_instr.h>
 
 36#include "entry.h"
 37
 38asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 39
 40/* FPU save area for the init task */
 41__vector128 init_task_fpu_regs[__NUM_VXRS] __init_task_data;
 42
 43/*
 44 * Return saved PC of a blocked thread. used in kernel/sched.
 45 * resume in entry.S does not create a new stack frame, it
 46 * just stores the registers %r6-%r15 to the frame given by
 47 * schedule. We want to return the address of the caller of
 48 * schedule, so we have to walk the backchain one time to
 49 * find the frame schedule() store its return address.
 50 */
 51unsigned long thread_saved_pc(struct task_struct *tsk)
 52{
 53	struct stack_frame *sf, *low, *high;
 54
 55	if (!tsk || !task_stack_page(tsk))
 56		return 0;
 57	low = task_stack_page(tsk);
 58	high = (struct stack_frame *) task_pt_regs(tsk);
 59	sf = (struct stack_frame *) tsk->thread.ksp;
 60	if (sf <= low || sf > high)
 61		return 0;
 62	sf = (struct stack_frame *) sf->back_chain;
 63	if (sf <= low || sf > high)
 64		return 0;
 65	return sf->gprs[8];
 66}
 67
 68extern void kernel_thread_starter(void);
 69
 70/*
 71 * Free current thread data structures etc..
 72 */
 73void exit_thread(void)
 74{
 75	exit_thread_runtime_instr();
 
 76}
 77
 78void flush_thread(void)
 79{
 80}
 81
 82void release_thread(struct task_struct *dead_task)
 83{
 
 
 
 
 
 84}
 85
 86void arch_release_task_struct(struct task_struct *tsk)
 87{
 88	/* Free either the floating-point or the vector register save area */
 89	kfree(tsk->thread.fpu.regs);
 90}
 91
 92int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 93{
 94	size_t fpu_regs_size;
 95
 96	*dst = *src;
 97
 98	/*
 99	 * If the vector extension is available, it is enabled for all tasks,
100	 * and, thus, the FPU register save area must be allocated accordingly.
101	 */
102	fpu_regs_size = MACHINE_HAS_VX ? sizeof(__vector128) * __NUM_VXRS
103				       : sizeof(freg_t) * __NUM_FPRS;
104	dst->thread.fpu.regs = kzalloc(fpu_regs_size, GFP_KERNEL|__GFP_REPEAT);
105	if (!dst->thread.fpu.regs)
106		return -ENOMEM;
107
108	/*
109	 * Save the floating-point or vector register state of the current
110	 * task and set the CIF_FPU flag to lazy restore the FPU register
111	 * state when returning to user space.
112	 */
113	save_fpu_regs();
114	dst->thread.fpu.fpc = current->thread.fpu.fpc;
115	memcpy(dst->thread.fpu.regs, current->thread.fpu.regs, fpu_regs_size);
 
 
 
 
 
 
 
 
 
 
 
 
116
117	return 0;
118}
119
120int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
121		unsigned long arg, struct task_struct *p)
122{
123	struct thread_info *ti;
 
 
124	struct fake_frame
125	{
126		struct stack_frame sf;
127		struct pt_regs childregs;
128	} *frame;
129
130	frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
131	p->thread.ksp = (unsigned long) frame;
132	/* Save access registers to new thread structure. */
133	save_access_regs(&p->thread.acrs[0]);
134	/* start new process with ar4 pointing to the correct address space */
135	p->thread.mm_segment = get_fs();
136	/* Don't copy debug registers */
137	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
138	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
139	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
 
140	/* Initialize per thread user and system timer values */
141	ti = task_thread_info(p);
142	ti->user_timer = 0;
143	ti->system_timer = 0;
 
 
 
144
145	frame->sf.back_chain = 0;
 
 
146	/* new return point is ret_from_fork */
147	frame->sf.gprs[8] = (unsigned long) ret_from_fork;
148	/* fake return stack for resume(), don't go back to schedule */
149	frame->sf.gprs[9] = (unsigned long) frame;
150
151	/* Store access registers to kernel stack of new process. */
152	if (unlikely(p->flags & PF_KTHREAD)) {
153		/* kernel thread */
154		memset(&frame->childregs, 0, sizeof(struct pt_regs));
155		frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
156				PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
157		frame->childregs.psw.addr =
158				(unsigned long) kernel_thread_starter;
159		frame->childregs.gprs[9] = new_stackp; /* function */
160		frame->childregs.gprs[10] = arg;
161		frame->childregs.gprs[11] = (unsigned long) do_exit;
162		frame->childregs.orig_gpr2 = -1;
163
164		return 0;
165	}
166	frame->childregs = *current_pt_regs();
167	frame->childregs.gprs[2] = 0;	/* child returns 0 on fork. */
168	frame->childregs.flags = 0;
169	if (new_stackp)
170		frame->childregs.gprs[15] = new_stackp;
171
172	/* Don't copy runtime instrumentation info */
173	p->thread.ri_cb = NULL;
 
174	frame->childregs.psw.mask &= ~PSW_MASK_RI;
175
176	/* Set a new TLS ?  */
177	if (clone_flags & CLONE_SETTLS) {
178		unsigned long tls = frame->childregs.gprs[6];
179		if (is_compat_task()) {
180			p->thread.acrs[0] = (unsigned int)tls;
181		} else {
182			p->thread.acrs[0] = (unsigned int)(tls >> 32);
183			p->thread.acrs[1] = (unsigned int)tls;
184		}
185	}
 
 
 
 
 
 
186	return 0;
187}
188
189asmlinkage void execve_tail(void)
190{
191	current->thread.fpu.fpc = 0;
192	asm volatile("sfpc %0" : : "d" (0));
193}
194
195/*
196 * fill in the FPU structure for a core dump.
197 */
198int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
199{
200	save_fpu_regs();
201	fpregs->fpc = current->thread.fpu.fpc;
202	fpregs->pad = 0;
203	if (MACHINE_HAS_VX)
204		convert_vx_to_fp((freg_t *)&fpregs->fprs,
205				 current->thread.fpu.vxrs);
206	else
207		memcpy(&fpregs->fprs, current->thread.fpu.fprs,
208		       sizeof(fpregs->fprs));
209	return 1;
210}
211EXPORT_SYMBOL(dump_fpu);
212
213unsigned long get_wchan(struct task_struct *p)
214{
215	struct stack_frame *sf, *low, *high;
216	unsigned long return_address;
217	int count;
218
219	if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
220		return 0;
221	low = task_stack_page(p);
222	high = (struct stack_frame *) task_pt_regs(p);
223	sf = (struct stack_frame *) p->thread.ksp;
224	if (sf <= low || sf > high)
225		return 0;
226	for (count = 0; count < 16; count++) {
227		sf = (struct stack_frame *) sf->back_chain;
228		if (sf <= low || sf > high)
229			return 0;
230		return_address = sf->gprs[8];
231		if (!in_sched_functions(return_address))
232			return return_address;
 
 
 
 
 
 
233	}
234	return 0;
 
 
235}
236
237unsigned long arch_align_stack(unsigned long sp)
238{
239	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
240		sp -= get_random_int() & ~PAGE_MASK;
241	return sp & ~0xf;
242}
243
244static inline unsigned long brk_rnd(void)
245{
246	return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
247}
248
249unsigned long arch_randomize_brk(struct mm_struct *mm)
250{
251	unsigned long ret;
252
253	ret = PAGE_ALIGN(mm->brk + brk_rnd());
254	return (ret > mm->brk) ? ret : mm->brk;
255}