Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This file handles the architecture dependent parts of process handling.
  4 *
  5 *    Copyright IBM Corp. 1999, 2009
  6 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  7 *		 Hartmut Penner <hp@de.ibm.com>,
  8 *		 Denis Joseph Barrow,
  9 */
 10
 11#include <linux/elf-randomize.h>
 12#include <linux/compiler.h>
 13#include <linux/cpu.h>
 14#include <linux/sched.h>
 15#include <linux/sched/debug.h>
 16#include <linux/sched/task.h>
 17#include <linux/sched/task_stack.h>
 18#include <linux/kernel.h>
 19#include <linux/mm.h>
 20#include <linux/elfcore.h>
 21#include <linux/smp.h>
 22#include <linux/slab.h>
 23#include <linux/interrupt.h>
 24#include <linux/tick.h>
 25#include <linux/personality.h>
 26#include <linux/syscalls.h>
 27#include <linux/compat.h>
 28#include <linux/kprobes.h>
 29#include <linux/random.h>
 30#include <linux/export.h>
 31#include <linux/init_task.h>
 32#include <asm/cpu_mf.h>
 33#include <asm/io.h>
 34#include <asm/processor.h>
 35#include <asm/vtimer.h>
 36#include <asm/exec.h>
 37#include <asm/irq.h>
 38#include <asm/nmi.h>
 39#include <asm/smp.h>
 40#include <asm/stacktrace.h>
 41#include <asm/switch_to.h>
 42#include <asm/runtime_instr.h>
 43#include "entry.h"
 44
 45asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 47extern void kernel_thread_starter(void);
 48
 
 
 
 
 
 
 
 
 49void flush_thread(void)
 50{
 51}
 52
 53void arch_setup_new_exec(void)
 54{
 55	if (S390_lowcore.current_pid != current->pid) {
 56		S390_lowcore.current_pid = current->pid;
 57		if (test_facility(40))
 58			lpp(&S390_lowcore.lpp);
 59	}
 60}
 61
 62void arch_release_task_struct(struct task_struct *tsk)
 63{
 64	runtime_instr_release(tsk);
 65	guarded_storage_release(tsk);
 66}
 67
 68int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 69{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 70	/*
 71	 * Save the floating-point or vector register state of the current
 72	 * task and set the CIF_FPU flag to lazy restore the FPU register
 73	 * state when returning to user space.
 74	 */
 75	save_fpu_regs();
 
 
 76
 77	memcpy(dst, src, arch_task_struct_size);
 78	dst->thread.fpu.regs = dst->thread.fpu.fprs;
 79	return 0;
 80}
 81
 82int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
 83		    unsigned long arg, struct task_struct *p, unsigned long tls)
 84{
 
 85	struct fake_frame
 86	{
 87		struct stack_frame sf;
 88		struct pt_regs childregs;
 89	} *frame;
 90
 91	frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
 92	p->thread.ksp = (unsigned long) frame;
 93	/* Save access registers to new thread structure. */
 94	save_access_regs(&p->thread.acrs[0]);
 95	/* start new process with ar4 pointing to the correct address space */
 96	p->thread.mm_segment = get_fs();
 97	/* Don't copy debug registers */
 98	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
 99	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
100	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
101	p->thread.per_flags = 0;
102	/* Initialize per thread user and system timer values */
103	p->thread.user_timer = 0;
104	p->thread.guest_timer = 0;
105	p->thread.system_timer = 0;
106	p->thread.hardirq_timer = 0;
107	p->thread.softirq_timer = 0;
108
109	frame->sf.back_chain = 0;
110	/* new return point is ret_from_fork */
111	frame->sf.gprs[8] = (unsigned long) ret_from_fork;
112	/* fake return stack for resume(), don't go back to schedule */
113	frame->sf.gprs[9] = (unsigned long) frame;
114
115	/* Store access registers to kernel stack of new process. */
116	if (unlikely(p->flags & PF_KTHREAD)) {
117		/* kernel thread */
118		memset(&frame->childregs, 0, sizeof(struct pt_regs));
119		frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
120				PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
121		frame->childregs.psw.addr =
122				(unsigned long) kernel_thread_starter;
123		frame->childregs.gprs[9] = new_stackp; /* function */
124		frame->childregs.gprs[10] = arg;
125		frame->childregs.gprs[11] = (unsigned long) do_exit;
126		frame->childregs.orig_gpr2 = -1;
127
128		return 0;
129	}
130	frame->childregs = *current_pt_regs();
131	frame->childregs.gprs[2] = 0;	/* child returns 0 on fork. */
132	frame->childregs.flags = 0;
133	if (new_stackp)
134		frame->childregs.gprs[15] = new_stackp;
135
136	/* Don't copy runtime instrumentation info */
137	p->thread.ri_cb = NULL;
138	frame->childregs.psw.mask &= ~PSW_MASK_RI;
139	/* Don't copy guarded storage control block */
140	p->thread.gs_cb = NULL;
141	p->thread.gs_bc_cb = NULL;
142
143	/* Set a new TLS ?  */
144	if (clone_flags & CLONE_SETTLS) {
 
145		if (is_compat_task()) {
146			p->thread.acrs[0] = (unsigned int)tls;
147		} else {
148			p->thread.acrs[0] = (unsigned int)(tls >> 32);
149			p->thread.acrs[1] = (unsigned int)tls;
150		}
151	}
152	return 0;
153}
154
155asmlinkage void execve_tail(void)
156{
157	current->thread.fpu.fpc = 0;
158	asm volatile("sfpc %0" : : "d" (0));
159}
160
161/*
162 * fill in the FPU structure for a core dump.
163 */
164int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
165{
166	save_fpu_regs();
167	fpregs->fpc = current->thread.fpu.fpc;
168	fpregs->pad = 0;
169	if (MACHINE_HAS_VX)
170		convert_vx_to_fp((freg_t *)&fpregs->fprs,
171				 current->thread.fpu.vxrs);
172	else
173		memcpy(&fpregs->fprs, current->thread.fpu.fprs,
174		       sizeof(fpregs->fprs));
175	return 1;
176}
177EXPORT_SYMBOL(dump_fpu);
178
179unsigned long get_wchan(struct task_struct *p)
180{
181	struct stack_frame *sf, *low, *high;
182	unsigned long return_address;
183	int count;
184
185	if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
186		return 0;
187
188	if (!try_get_task_stack(p))
189		return 0;
190
191	low = task_stack_page(p);
192	high = (struct stack_frame *) task_pt_regs(p);
193	sf = (struct stack_frame *) p->thread.ksp;
194	if (sf <= low || sf > high) {
195		return_address = 0;
196		goto out;
197	}
198	for (count = 0; count < 16; count++) {
199		sf = (struct stack_frame *)READ_ONCE_NOCHECK(sf->back_chain);
200		if (sf <= low || sf > high) {
201			return_address = 0;
202			goto out;
203		}
204		return_address = READ_ONCE_NOCHECK(sf->gprs[8]);
205		if (!in_sched_functions(return_address))
206			goto out;
207	}
208out:
209	put_task_stack(p);
210	return return_address;
211}
212
213unsigned long arch_align_stack(unsigned long sp)
214{
215	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
216		sp -= get_random_int() & ~PAGE_MASK;
217	return sp & ~0xf;
218}
219
220static inline unsigned long brk_rnd(void)
221{
222	return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
223}
224
225unsigned long arch_randomize_brk(struct mm_struct *mm)
226{
227	unsigned long ret;
228
229	ret = PAGE_ALIGN(mm->brk + brk_rnd());
230	return (ret > mm->brk) ? ret : mm->brk;
231}
232
233void set_fs_fixup(void)
234{
235	struct pt_regs *regs = current_pt_regs();
236	static bool warned;
237
238	set_fs(USER_DS);
239	if (warned)
240		return;
241	WARN(1, "Unbalanced set_fs - int code: 0x%x\n", regs->int_code);
242	show_registers(regs);
243	warned = true;
244}
v4.6
 
  1/*
  2 * This file handles the architecture dependent parts of process handling.
  3 *
  4 *    Copyright IBM Corp. 1999, 2009
  5 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  6 *		 Hartmut Penner <hp@de.ibm.com>,
  7 *		 Denis Joseph Barrow,
  8 */
  9
 
 10#include <linux/compiler.h>
 11#include <linux/cpu.h>
 12#include <linux/sched.h>
 
 
 
 13#include <linux/kernel.h>
 14#include <linux/mm.h>
 15#include <linux/elfcore.h>
 16#include <linux/smp.h>
 17#include <linux/slab.h>
 18#include <linux/interrupt.h>
 19#include <linux/tick.h>
 20#include <linux/personality.h>
 21#include <linux/syscalls.h>
 22#include <linux/compat.h>
 23#include <linux/kprobes.h>
 24#include <linux/random.h>
 25#include <linux/module.h>
 26#include <linux/init_task.h>
 
 27#include <asm/io.h>
 28#include <asm/processor.h>
 29#include <asm/vtimer.h>
 30#include <asm/exec.h>
 31#include <asm/irq.h>
 32#include <asm/nmi.h>
 33#include <asm/smp.h>
 
 34#include <asm/switch_to.h>
 35#include <asm/runtime_instr.h>
 36#include "entry.h"
 37
 38asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 39
 40/* FPU save area for the init task */
 41__vector128 init_task_fpu_regs[__NUM_VXRS] __init_task_data;
 42
 43/*
 44 * Return saved PC of a blocked thread. used in kernel/sched.
 45 * resume in entry.S does not create a new stack frame, it
 46 * just stores the registers %r6-%r15 to the frame given by
 47 * schedule. We want to return the address of the caller of
 48 * schedule, so we have to walk the backchain one time to
 49 * find the frame schedule() store its return address.
 50 */
 51unsigned long thread_saved_pc(struct task_struct *tsk)
 52{
 53	struct stack_frame *sf, *low, *high;
 54
 55	if (!tsk || !task_stack_page(tsk))
 56		return 0;
 57	low = task_stack_page(tsk);
 58	high = (struct stack_frame *) task_pt_regs(tsk);
 59	sf = (struct stack_frame *) tsk->thread.ksp;
 60	if (sf <= low || sf > high)
 61		return 0;
 62	sf = (struct stack_frame *) sf->back_chain;
 63	if (sf <= low || sf > high)
 64		return 0;
 65	return sf->gprs[8];
 66}
 67
 68extern void kernel_thread_starter(void);
 69
 70/*
 71 * Free current thread data structures etc..
 72 */
 73void exit_thread(void)
 74{
 75	exit_thread_runtime_instr();
 76}
 77
 78void flush_thread(void)
 79{
 80}
 81
 82void release_thread(struct task_struct *dead_task)
 83{
 
 
 
 
 
 84}
 85
 86void arch_release_task_struct(struct task_struct *tsk)
 87{
 88	/* Free either the floating-point or the vector register save area */
 89	kfree(tsk->thread.fpu.regs);
 90}
 91
 92int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 93{
 94	size_t fpu_regs_size;
 95
 96	*dst = *src;
 97
 98	/*
 99	 * If the vector extension is available, it is enabled for all tasks,
100	 * and, thus, the FPU register save area must be allocated accordingly.
101	 */
102	fpu_regs_size = MACHINE_HAS_VX ? sizeof(__vector128) * __NUM_VXRS
103				       : sizeof(freg_t) * __NUM_FPRS;
104	dst->thread.fpu.regs = kzalloc(fpu_regs_size, GFP_KERNEL|__GFP_REPEAT);
105	if (!dst->thread.fpu.regs)
106		return -ENOMEM;
107
108	/*
109	 * Save the floating-point or vector register state of the current
110	 * task and set the CIF_FPU flag to lazy restore the FPU register
111	 * state when returning to user space.
112	 */
113	save_fpu_regs();
114	dst->thread.fpu.fpc = current->thread.fpu.fpc;
115	memcpy(dst->thread.fpu.regs, current->thread.fpu.regs, fpu_regs_size);
116
 
 
117	return 0;
118}
119
120int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
121		unsigned long arg, struct task_struct *p)
122{
123	struct thread_info *ti;
124	struct fake_frame
125	{
126		struct stack_frame sf;
127		struct pt_regs childregs;
128	} *frame;
129
130	frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
131	p->thread.ksp = (unsigned long) frame;
132	/* Save access registers to new thread structure. */
133	save_access_regs(&p->thread.acrs[0]);
134	/* start new process with ar4 pointing to the correct address space */
135	p->thread.mm_segment = get_fs();
136	/* Don't copy debug registers */
137	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
138	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
139	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
 
140	/* Initialize per thread user and system timer values */
141	ti = task_thread_info(p);
142	ti->user_timer = 0;
143	ti->system_timer = 0;
 
 
144
145	frame->sf.back_chain = 0;
146	/* new return point is ret_from_fork */
147	frame->sf.gprs[8] = (unsigned long) ret_from_fork;
148	/* fake return stack for resume(), don't go back to schedule */
149	frame->sf.gprs[9] = (unsigned long) frame;
150
151	/* Store access registers to kernel stack of new process. */
152	if (unlikely(p->flags & PF_KTHREAD)) {
153		/* kernel thread */
154		memset(&frame->childregs, 0, sizeof(struct pt_regs));
155		frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
156				PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
157		frame->childregs.psw.addr =
158				(unsigned long) kernel_thread_starter;
159		frame->childregs.gprs[9] = new_stackp; /* function */
160		frame->childregs.gprs[10] = arg;
161		frame->childregs.gprs[11] = (unsigned long) do_exit;
162		frame->childregs.orig_gpr2 = -1;
163
164		return 0;
165	}
166	frame->childregs = *current_pt_regs();
167	frame->childregs.gprs[2] = 0;	/* child returns 0 on fork. */
168	frame->childregs.flags = 0;
169	if (new_stackp)
170		frame->childregs.gprs[15] = new_stackp;
171
172	/* Don't copy runtime instrumentation info */
173	p->thread.ri_cb = NULL;
174	frame->childregs.psw.mask &= ~PSW_MASK_RI;
 
 
 
175
176	/* Set a new TLS ?  */
177	if (clone_flags & CLONE_SETTLS) {
178		unsigned long tls = frame->childregs.gprs[6];
179		if (is_compat_task()) {
180			p->thread.acrs[0] = (unsigned int)tls;
181		} else {
182			p->thread.acrs[0] = (unsigned int)(tls >> 32);
183			p->thread.acrs[1] = (unsigned int)tls;
184		}
185	}
186	return 0;
187}
188
189asmlinkage void execve_tail(void)
190{
191	current->thread.fpu.fpc = 0;
192	asm volatile("sfpc %0" : : "d" (0));
193}
194
195/*
196 * fill in the FPU structure for a core dump.
197 */
198int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
199{
200	save_fpu_regs();
201	fpregs->fpc = current->thread.fpu.fpc;
202	fpregs->pad = 0;
203	if (MACHINE_HAS_VX)
204		convert_vx_to_fp((freg_t *)&fpregs->fprs,
205				 current->thread.fpu.vxrs);
206	else
207		memcpy(&fpregs->fprs, current->thread.fpu.fprs,
208		       sizeof(fpregs->fprs));
209	return 1;
210}
211EXPORT_SYMBOL(dump_fpu);
212
213unsigned long get_wchan(struct task_struct *p)
214{
215	struct stack_frame *sf, *low, *high;
216	unsigned long return_address;
217	int count;
218
219	if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
220		return 0;
 
 
 
 
221	low = task_stack_page(p);
222	high = (struct stack_frame *) task_pt_regs(p);
223	sf = (struct stack_frame *) p->thread.ksp;
224	if (sf <= low || sf > high)
225		return 0;
 
 
226	for (count = 0; count < 16; count++) {
227		sf = (struct stack_frame *) sf->back_chain;
228		if (sf <= low || sf > high)
229			return 0;
230		return_address = sf->gprs[8];
 
 
231		if (!in_sched_functions(return_address))
232			return return_address;
233	}
234	return 0;
 
 
235}
236
237unsigned long arch_align_stack(unsigned long sp)
238{
239	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
240		sp -= get_random_int() & ~PAGE_MASK;
241	return sp & ~0xf;
242}
243
244static inline unsigned long brk_rnd(void)
245{
246	return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
247}
248
249unsigned long arch_randomize_brk(struct mm_struct *mm)
250{
251	unsigned long ret;
252
253	ret = PAGE_ALIGN(mm->brk + brk_rnd());
254	return (ret > mm->brk) ? ret : mm->brk;
 
 
 
 
 
 
 
 
 
 
 
 
 
255}