Linux Audio

Check our new training course

Loading...
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * This file handles the architecture dependent parts of process handling.
  4 *
  5 *    Copyright IBM Corp. 1999, 2009
  6 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  7 *		 Hartmut Penner <hp@de.ibm.com>,
  8 *		 Denis Joseph Barrow,
  9 */
 10
 11#include <linux/elf-randomize.h>
 12#include <linux/compiler.h>
 13#include <linux/cpu.h>
 14#include <linux/sched.h>
 15#include <linux/sched/debug.h>
 16#include <linux/sched/task.h>
 17#include <linux/sched/task_stack.h>
 18#include <linux/kernel.h>
 19#include <linux/mm.h>
 20#include <linux/elfcore.h>
 21#include <linux/smp.h>
 22#include <linux/slab.h>
 23#include <linux/interrupt.h>
 24#include <linux/tick.h>
 25#include <linux/personality.h>
 26#include <linux/syscalls.h>
 27#include <linux/compat.h>
 28#include <linux/kprobes.h>
 29#include <linux/random.h>
 30#include <linux/export.h>
 31#include <linux/init_task.h>
 32#include <asm/cpu_mf.h>
 33#include <asm/io.h>
 34#include <asm/processor.h>
 35#include <asm/vtimer.h>
 36#include <asm/exec.h>
 37#include <asm/irq.h>
 38#include <asm/nmi.h>
 39#include <asm/smp.h>
 40#include <asm/switch_to.h>
 41#include <asm/runtime_instr.h>
 42#include "entry.h"
 43
 44asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 45
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 46extern void kernel_thread_starter(void);
 47
 
 
 
 
 
 
 
 
 
 48void flush_thread(void)
 49{
 50}
 51
 52void arch_setup_new_exec(void)
 53{
 54	if (S390_lowcore.current_pid != current->pid) {
 55		S390_lowcore.current_pid = current->pid;
 56		if (test_facility(40))
 57			lpp(&S390_lowcore.lpp);
 58	}
 59}
 60
 61void arch_release_task_struct(struct task_struct *tsk)
 62{
 63	runtime_instr_release(tsk);
 64	guarded_storage_release(tsk);
 65}
 66
 67int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 68{
 69	/*
 70	 * Save the floating-point or vector register state of the current
 71	 * task and set the CIF_FPU flag to lazy restore the FPU register
 72	 * state when returning to user space.
 73	 */
 74	save_fpu_regs();
 75
 76	memcpy(dst, src, arch_task_struct_size);
 77	dst->thread.fpu.regs = dst->thread.fpu.fprs;
 78	return 0;
 79}
 80
 81int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp,
 82		    unsigned long arg, struct task_struct *p, unsigned long tls)
 83{
 84	struct fake_frame
 85	{
 86		struct stack_frame sf;
 87		struct pt_regs childregs;
 88	} *frame;
 89
 90	frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
 91	p->thread.ksp = (unsigned long) frame;
 92	/* Save access registers to new thread structure. */
 93	save_access_regs(&p->thread.acrs[0]);
 94	/* start new process with ar4 pointing to the correct address space */
 95	p->thread.mm_segment = get_fs();
 96	/* Don't copy debug registers */
 97	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
 98	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
 99	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
100	p->thread.per_flags = 0;
101	/* Initialize per thread user and system timer values */
102	p->thread.user_timer = 0;
103	p->thread.guest_timer = 0;
104	p->thread.system_timer = 0;
105	p->thread.hardirq_timer = 0;
106	p->thread.softirq_timer = 0;
107
108	frame->sf.back_chain = 0;
109	/* new return point is ret_from_fork */
110	frame->sf.gprs[8] = (unsigned long) ret_from_fork;
111	/* fake return stack for resume(), don't go back to schedule */
112	frame->sf.gprs[9] = (unsigned long) frame;
113
114	/* Store access registers to kernel stack of new process. */
115	if (unlikely(p->flags & PF_KTHREAD)) {
116		/* kernel thread */
117		memset(&frame->childregs, 0, sizeof(struct pt_regs));
118		frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
119				PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
120		frame->childregs.psw.addr =
121				(unsigned long) kernel_thread_starter;
122		frame->childregs.gprs[9] = new_stackp; /* function */
123		frame->childregs.gprs[10] = arg;
124		frame->childregs.gprs[11] = (unsigned long) do_exit;
125		frame->childregs.orig_gpr2 = -1;
126
127		return 0;
128	}
129	frame->childregs = *current_pt_regs();
130	frame->childregs.gprs[2] = 0;	/* child returns 0 on fork. */
131	frame->childregs.flags = 0;
132	if (new_stackp)
133		frame->childregs.gprs[15] = new_stackp;
134
135	/* Don't copy runtime instrumentation info */
136	p->thread.ri_cb = NULL;
137	frame->childregs.psw.mask &= ~PSW_MASK_RI;
138	/* Don't copy guarded storage control block */
139	p->thread.gs_cb = NULL;
140	p->thread.gs_bc_cb = NULL;
141
142	/* Set a new TLS ?  */
143	if (clone_flags & CLONE_SETTLS) {
 
144		if (is_compat_task()) {
145			p->thread.acrs[0] = (unsigned int)tls;
146		} else {
147			p->thread.acrs[0] = (unsigned int)(tls >> 32);
148			p->thread.acrs[1] = (unsigned int)tls;
149		}
150	}
151	return 0;
152}
153
154asmlinkage void execve_tail(void)
155{
156	current->thread.fpu.fpc = 0;
157	asm volatile("sfpc %0" : : "d" (0));
158}
159
160/*
161 * fill in the FPU structure for a core dump.
162 */
163int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
164{
165	save_fpu_regs();
166	fpregs->fpc = current->thread.fpu.fpc;
167	fpregs->pad = 0;
168	if (MACHINE_HAS_VX)
169		convert_vx_to_fp((freg_t *)&fpregs->fprs,
170				 current->thread.fpu.vxrs);
171	else
172		memcpy(&fpregs->fprs, current->thread.fpu.fprs,
173		       sizeof(fpregs->fprs));
174	return 1;
175}
176EXPORT_SYMBOL(dump_fpu);
177
178unsigned long get_wchan(struct task_struct *p)
179{
180	struct stack_frame *sf, *low, *high;
181	unsigned long return_address;
182	int count;
183
184	if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
185		return 0;
186	low = task_stack_page(p);
187	high = (struct stack_frame *) task_pt_regs(p);
188	sf = (struct stack_frame *) p->thread.ksp;
189	if (sf <= low || sf > high)
190		return 0;
191	for (count = 0; count < 16; count++) {
192		sf = (struct stack_frame *) sf->back_chain;
193		if (sf <= low || sf > high)
194			return 0;
195		return_address = sf->gprs[8];
196		if (!in_sched_functions(return_address))
197			return return_address;
198	}
199	return 0;
200}
201
202unsigned long arch_align_stack(unsigned long sp)
203{
204	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
205		sp -= get_random_int() & ~PAGE_MASK;
206	return sp & ~0xf;
207}
208
209static inline unsigned long brk_rnd(void)
210{
211	return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
212}
213
214unsigned long arch_randomize_brk(struct mm_struct *mm)
215{
216	unsigned long ret;
217
218	ret = PAGE_ALIGN(mm->brk + brk_rnd());
219	return (ret > mm->brk) ? ret : mm->brk;
220}
221
222void set_fs_fixup(void)
223{
224	struct pt_regs *regs = current_pt_regs();
225	static bool warned;
226
227	set_fs(USER_DS);
228	if (warned)
229		return;
230	WARN(1, "Unbalanced set_fs - int code: 0x%x\n", regs->int_code);
231	show_registers(regs);
232	warned = true;
233}
v4.10.11
 
  1/*
  2 * This file handles the architecture dependent parts of process handling.
  3 *
  4 *    Copyright IBM Corp. 1999, 2009
  5 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>,
  6 *		 Hartmut Penner <hp@de.ibm.com>,
  7 *		 Denis Joseph Barrow,
  8 */
  9
 10#include <linux/elf-randomize.h>
 11#include <linux/compiler.h>
 12#include <linux/cpu.h>
 13#include <linux/sched.h>
 
 
 
 14#include <linux/kernel.h>
 15#include <linux/mm.h>
 16#include <linux/elfcore.h>
 17#include <linux/smp.h>
 18#include <linux/slab.h>
 19#include <linux/interrupt.h>
 20#include <linux/tick.h>
 21#include <linux/personality.h>
 22#include <linux/syscalls.h>
 23#include <linux/compat.h>
 24#include <linux/kprobes.h>
 25#include <linux/random.h>
 26#include <linux/module.h>
 27#include <linux/init_task.h>
 
 28#include <asm/io.h>
 29#include <asm/processor.h>
 30#include <asm/vtimer.h>
 31#include <asm/exec.h>
 32#include <asm/irq.h>
 33#include <asm/nmi.h>
 34#include <asm/smp.h>
 35#include <asm/switch_to.h>
 36#include <asm/runtime_instr.h>
 37#include "entry.h"
 38
 39asmlinkage void ret_from_fork(void) asm ("ret_from_fork");
 40
 41/*
 42 * Return saved PC of a blocked thread. used in kernel/sched.
 43 * resume in entry.S does not create a new stack frame, it
 44 * just stores the registers %r6-%r15 to the frame given by
 45 * schedule. We want to return the address of the caller of
 46 * schedule, so we have to walk the backchain one time to
 47 * find the frame schedule() store its return address.
 48 */
 49unsigned long thread_saved_pc(struct task_struct *tsk)
 50{
 51	struct stack_frame *sf, *low, *high;
 52
 53	if (!tsk || !task_stack_page(tsk))
 54		return 0;
 55	low = task_stack_page(tsk);
 56	high = (struct stack_frame *) task_pt_regs(tsk);
 57	sf = (struct stack_frame *) tsk->thread.ksp;
 58	if (sf <= low || sf > high)
 59		return 0;
 60	sf = (struct stack_frame *) sf->back_chain;
 61	if (sf <= low || sf > high)
 62		return 0;
 63	return sf->gprs[8];
 64}
 65
 66extern void kernel_thread_starter(void);
 67
 68/*
 69 * Free current thread data structures etc..
 70 */
 71void exit_thread(struct task_struct *tsk)
 72{
 73	if (tsk == current)
 74		exit_thread_runtime_instr();
 75}
 76
 77void flush_thread(void)
 78{
 79}
 80
 81void release_thread(struct task_struct *dead_task)
 82{
 
 
 
 
 
 83}
 84
 85void arch_release_task_struct(struct task_struct *tsk)
 86{
 
 
 87}
 88
 89int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 90{
 91	/*
 92	 * Save the floating-point or vector register state of the current
 93	 * task and set the CIF_FPU flag to lazy restore the FPU register
 94	 * state when returning to user space.
 95	 */
 96	save_fpu_regs();
 97
 98	memcpy(dst, src, arch_task_struct_size);
 99	dst->thread.fpu.regs = dst->thread.fpu.fprs;
100	return 0;
101}
102
103int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
104		unsigned long arg, struct task_struct *p)
105{
106	struct fake_frame
107	{
108		struct stack_frame sf;
109		struct pt_regs childregs;
110	} *frame;
111
112	frame = container_of(task_pt_regs(p), struct fake_frame, childregs);
113	p->thread.ksp = (unsigned long) frame;
114	/* Save access registers to new thread structure. */
115	save_access_regs(&p->thread.acrs[0]);
116	/* start new process with ar4 pointing to the correct address space */
117	p->thread.mm_segment = get_fs();
118	/* Don't copy debug registers */
119	memset(&p->thread.per_user, 0, sizeof(p->thread.per_user));
120	memset(&p->thread.per_event, 0, sizeof(p->thread.per_event));
121	clear_tsk_thread_flag(p, TIF_SINGLE_STEP);
 
122	/* Initialize per thread user and system timer values */
123	p->thread.user_timer = 0;
 
124	p->thread.system_timer = 0;
 
 
125
126	frame->sf.back_chain = 0;
127	/* new return point is ret_from_fork */
128	frame->sf.gprs[8] = (unsigned long) ret_from_fork;
129	/* fake return stack for resume(), don't go back to schedule */
130	frame->sf.gprs[9] = (unsigned long) frame;
131
132	/* Store access registers to kernel stack of new process. */
133	if (unlikely(p->flags & PF_KTHREAD)) {
134		/* kernel thread */
135		memset(&frame->childregs, 0, sizeof(struct pt_regs));
136		frame->childregs.psw.mask = PSW_KERNEL_BITS | PSW_MASK_DAT |
137				PSW_MASK_IO | PSW_MASK_EXT | PSW_MASK_MCHECK;
138		frame->childregs.psw.addr =
139				(unsigned long) kernel_thread_starter;
140		frame->childregs.gprs[9] = new_stackp; /* function */
141		frame->childregs.gprs[10] = arg;
142		frame->childregs.gprs[11] = (unsigned long) do_exit;
143		frame->childregs.orig_gpr2 = -1;
144
145		return 0;
146	}
147	frame->childregs = *current_pt_regs();
148	frame->childregs.gprs[2] = 0;	/* child returns 0 on fork. */
149	frame->childregs.flags = 0;
150	if (new_stackp)
151		frame->childregs.gprs[15] = new_stackp;
152
153	/* Don't copy runtime instrumentation info */
154	p->thread.ri_cb = NULL;
155	frame->childregs.psw.mask &= ~PSW_MASK_RI;
 
 
 
156
157	/* Set a new TLS ?  */
158	if (clone_flags & CLONE_SETTLS) {
159		unsigned long tls = frame->childregs.gprs[6];
160		if (is_compat_task()) {
161			p->thread.acrs[0] = (unsigned int)tls;
162		} else {
163			p->thread.acrs[0] = (unsigned int)(tls >> 32);
164			p->thread.acrs[1] = (unsigned int)tls;
165		}
166	}
167	return 0;
168}
169
170asmlinkage void execve_tail(void)
171{
172	current->thread.fpu.fpc = 0;
173	asm volatile("sfpc %0" : : "d" (0));
174}
175
176/*
177 * fill in the FPU structure for a core dump.
178 */
179int dump_fpu (struct pt_regs * regs, s390_fp_regs *fpregs)
180{
181	save_fpu_regs();
182	fpregs->fpc = current->thread.fpu.fpc;
183	fpregs->pad = 0;
184	if (MACHINE_HAS_VX)
185		convert_vx_to_fp((freg_t *)&fpregs->fprs,
186				 current->thread.fpu.vxrs);
187	else
188		memcpy(&fpregs->fprs, current->thread.fpu.fprs,
189		       sizeof(fpregs->fprs));
190	return 1;
191}
192EXPORT_SYMBOL(dump_fpu);
193
194unsigned long get_wchan(struct task_struct *p)
195{
196	struct stack_frame *sf, *low, *high;
197	unsigned long return_address;
198	int count;
199
200	if (!p || p == current || p->state == TASK_RUNNING || !task_stack_page(p))
201		return 0;
202	low = task_stack_page(p);
203	high = (struct stack_frame *) task_pt_regs(p);
204	sf = (struct stack_frame *) p->thread.ksp;
205	if (sf <= low || sf > high)
206		return 0;
207	for (count = 0; count < 16; count++) {
208		sf = (struct stack_frame *) sf->back_chain;
209		if (sf <= low || sf > high)
210			return 0;
211		return_address = sf->gprs[8];
212		if (!in_sched_functions(return_address))
213			return return_address;
214	}
215	return 0;
216}
217
218unsigned long arch_align_stack(unsigned long sp)
219{
220	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
221		sp -= get_random_int() & ~PAGE_MASK;
222	return sp & ~0xf;
223}
224
225static inline unsigned long brk_rnd(void)
226{
227	return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
228}
229
230unsigned long arch_randomize_brk(struct mm_struct *mm)
231{
232	unsigned long ret;
233
234	ret = PAGE_ALIGN(mm->brk + brk_rnd());
235	return (ret > mm->brk) ? ret : mm->brk;
 
 
 
 
 
 
 
 
 
 
 
 
 
236}