Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 *  linux/arch/arm/kernel/process.c
  3 *
  4 *  Copyright (C) 1996-2000 Russell King - Converted to ARM.
  5 *  Original Copyright (C) 1995  Linus Torvalds
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 as
  9 * published by the Free Software Foundation.
 10 */
 11#include <stdarg.h>
 12
 13#include <linux/export.h>
 14#include <linux/sched.h>
 
 
 
 15#include <linux/kernel.h>
 16#include <linux/mm.h>
 17#include <linux/stddef.h>
 18#include <linux/unistd.h>
 19#include <linux/user.h>
 20#include <linux/interrupt.h>
 21#include <linux/kallsyms.h>
 22#include <linux/init.h>
 23#include <linux/elfcore.h>
 24#include <linux/pm.h>
 25#include <linux/tick.h>
 26#include <linux/utsname.h>
 27#include <linux/uaccess.h>
 28#include <linux/random.h>
 29#include <linux/hw_breakpoint.h>
 30#include <linux/leds.h>
 31
 32#include <asm/processor.h>
 33#include <asm/thread_notify.h>
 34#include <asm/stacktrace.h>
 35#include <asm/system_misc.h>
 36#include <asm/mach/time.h>
 37#include <asm/tls.h>
 38#include <asm/vdso.h>
 39
 40#ifdef CONFIG_CC_STACKPROTECTOR
 
 
 
 
 
 
 41#include <linux/stackprotector.h>
 42unsigned long __stack_chk_guard __read_mostly;
 43EXPORT_SYMBOL(__stack_chk_guard);
 44#endif
 45
 
 
 
 
 
 46static const char *processor_modes[] __maybe_unused = {
 47  "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
 48  "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
 49  "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "MON_32" , "ABT_32" ,
 50  "UK8_32" , "UK9_32" , "HYP_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
 51};
 52
 53static const char *isa_modes[] __maybe_unused = {
 54  "ARM" , "Thumb" , "Jazelle", "ThumbEE"
 55};
 56
 57/*
 58 * This is our default idle handler.
 59 */
 60
 61void (*arm_pm_idle)(void);
 62
 63/*
 64 * Called from the core idle loop.
 65 */
 66
 67void arch_cpu_idle(void)
 68{
 69	if (arm_pm_idle)
 70		arm_pm_idle();
 71	else
 72		cpu_do_idle();
 73	local_irq_enable();
 74}
 75
 76void arch_cpu_idle_prepare(void)
 77{
 78	local_fiq_enable();
 79}
 80
 81void arch_cpu_idle_enter(void)
 82{
 83	ledtrig_cpu(CPU_LED_IDLE_START);
 84#ifdef CONFIG_PL310_ERRATA_769419
 85	wmb();
 86#endif
 87}
 88
 89void arch_cpu_idle_exit(void)
 90{
 91	ledtrig_cpu(CPU_LED_IDLE_END);
 92}
 93
 
 
 
 
 
 
 
 
 
 
 
 94void __show_regs(struct pt_regs *regs)
 95{
 96	unsigned long flags;
 97	char buf[64];
 98#ifndef CONFIG_CPU_V7M
 99	unsigned int domain;
100#ifdef CONFIG_CPU_SW_DOMAIN_PAN
101	/*
102	 * Get the domain register for the parent context. In user
103	 * mode, we don't save the DACR, so lets use what it should
104	 * be. For other modes, we place it after the pt_regs struct.
105	 */
106	if (user_mode(regs))
107		domain = DACR_UACCESS_ENABLE;
108	else
109		domain = *(unsigned int *)(regs + 1);
 
110#else
111	domain = get_domain();
112#endif
113#endif
114
115	show_regs_print_info(KERN_DEFAULT);
116
117	print_symbol("PC is at %s\n", instruction_pointer(regs));
118	print_symbol("LR is at %s\n", regs->ARM_lr);
119	printk("pc : [<%08lx>]    lr : [<%08lx>]    psr: %08lx\n"
120	       "sp : %08lx  ip : %08lx  fp : %08lx\n",
121		regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
122		regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
123	printk("r10: %08lx  r9 : %08lx  r8 : %08lx\n",
124		regs->ARM_r10, regs->ARM_r9,
125		regs->ARM_r8);
126	printk("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
127		regs->ARM_r7, regs->ARM_r6,
128		regs->ARM_r5, regs->ARM_r4);
129	printk("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
130		regs->ARM_r3, regs->ARM_r2,
131		regs->ARM_r1, regs->ARM_r0);
132
133	flags = regs->ARM_cpsr;
134	buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
135	buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
136	buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
137	buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
138	buf[4] = '\0';
139
140#ifndef CONFIG_CPU_V7M
141	{
142		const char *segment;
143
144		if ((domain & domain_mask(DOMAIN_USER)) ==
145		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
146			segment = "none";
147		else if (get_fs() == get_ds())
148			segment = "kernel";
149		else
150			segment = "user";
151
152		printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
153			buf, interrupts_enabled(regs) ? "n" : "ff",
154			fast_interrupts_enabled(regs) ? "n" : "ff",
155			processor_modes[processor_mode(regs)],
156			isa_modes[isa_mode(regs)], segment);
157	}
158#else
159	printk("xPSR: %08lx\n", regs->ARM_cpsr);
160#endif
161
162#ifdef CONFIG_CPU_CP15
163	{
164		unsigned int ctrl;
165
166		buf[0] = '\0';
167#ifdef CONFIG_CPU_CP15_MMU
168		{
169			unsigned int transbase;
170			asm("mrc p15, 0, %0, c2, c0\n\t"
171			    : "=r" (transbase));
172			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
173				transbase, domain);
174		}
175#endif
176		asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
177
178		printk("Control: %08x%s\n", ctrl, buf);
179	}
180#endif
181}
182
183void show_regs(struct pt_regs * regs)
184{
185	__show_regs(regs);
186	dump_stack();
187}
188
189ATOMIC_NOTIFIER_HEAD(thread_notify_head);
190
191EXPORT_SYMBOL_GPL(thread_notify_head);
192
193/*
194 * Free current thread data structures etc..
195 */
196void exit_thread(void)
197{
198	thread_notify(THREAD_NOTIFY_EXIT, current_thread_info());
199}
200
201void flush_thread(void)
202{
203	struct thread_info *thread = current_thread_info();
204	struct task_struct *tsk = current;
205
206	flush_ptrace_hw_breakpoint(tsk);
207
208	memset(thread->used_cp, 0, sizeof(thread->used_cp));
209	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
210	memset(&thread->fpstate, 0, sizeof(union fp_state));
211
212	flush_tls();
213
214	thread_notify(THREAD_NOTIFY_FLUSH, thread);
215}
216
217void release_thread(struct task_struct *dead_task)
218{
219}
220
221asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
222
223int
224copy_thread(unsigned long clone_flags, unsigned long stack_start,
225	    unsigned long stk_sz, struct task_struct *p)
226{
 
 
 
227	struct thread_info *thread = task_thread_info(p);
228	struct pt_regs *childregs = task_pt_regs(p);
229
230	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
231
232#ifdef CONFIG_CPU_USE_DOMAINS
233	/*
234	 * Copy the initial value of the domain access control register
235	 * from the current thread: thread->addr_limit will have been
236	 * copied from the current thread via setup_thread_stack() in
237	 * kernel/fork.c
238	 */
239	thread->cpu_domain = get_domain();
240#endif
241
242	if (likely(!(p->flags & PF_KTHREAD))) {
243		*childregs = *current_pt_regs();
244		childregs->ARM_r0 = 0;
245		if (stack_start)
246			childregs->ARM_sp = stack_start;
247	} else {
248		memset(childregs, 0, sizeof(struct pt_regs));
249		thread->cpu_context.r4 = stk_sz;
250		thread->cpu_context.r5 = stack_start;
251		childregs->ARM_cpsr = SVC_MODE;
252	}
253	thread->cpu_context.pc = (unsigned long)ret_from_fork;
254	thread->cpu_context.sp = (unsigned long)childregs;
255
256	clear_ptrace_hw_breakpoint(p);
257
258	if (clone_flags & CLONE_SETTLS)
259		thread->tp_value[0] = childregs->ARM_r3;
260	thread->tp_value[1] = get_tpuser();
261
262	thread_notify(THREAD_NOTIFY_COPY, thread);
263
264	return 0;
265}
266
267/*
268 * Fill in the task's elfregs structure for a core dump.
269 */
270int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
271{
272	elf_core_copy_regs(elfregs, task_pt_regs(t));
273	return 1;
274}
275
276/*
277 * fill in the fpe structure for a core dump...
278 */
279int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
280{
281	struct thread_info *thread = current_thread_info();
282	int used_math = thread->used_cp[1] | thread->used_cp[2];
283
284	if (used_math)
285		memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
286
287	return used_math != 0;
288}
289EXPORT_SYMBOL(dump_fpu);
290
291unsigned long get_wchan(struct task_struct *p)
292{
293	struct stackframe frame;
294	unsigned long stack_page;
295	int count = 0;
296	if (!p || p == current || p->state == TASK_RUNNING)
297		return 0;
298
299	frame.fp = thread_saved_fp(p);
300	frame.sp = thread_saved_sp(p);
301	frame.lr = 0;			/* recovered from the stack */
302	frame.pc = thread_saved_pc(p);
303	stack_page = (unsigned long)task_stack_page(p);
304	do {
305		if (frame.sp < stack_page ||
306		    frame.sp >= stack_page + THREAD_SIZE ||
307		    unwind_frame(&frame) < 0)
308			return 0;
309		if (!in_sched_functions(frame.pc))
310			return frame.pc;
311	} while (count ++ < 16);
312	return 0;
313}
314
315unsigned long arch_randomize_brk(struct mm_struct *mm)
316{
317	unsigned long range_end = mm->brk + 0x02000000;
318	return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
319}
320
321#ifdef CONFIG_MMU
322#ifdef CONFIG_KUSER_HELPERS
323/*
324 * The vectors page is always readable from user space for the
325 * atomic helpers. Insert it into the gate_vma so that it is visible
326 * through ptrace and /proc/<pid>/mem.
327 */
328static struct vm_area_struct gate_vma = {
329	.vm_start	= 0xffff0000,
330	.vm_end		= 0xffff0000 + PAGE_SIZE,
331	.vm_flags	= VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
332};
333
334static int __init gate_vma_init(void)
335{
 
336	gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
 
 
 
337	return 0;
338}
339arch_initcall(gate_vma_init);
340
341struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
342{
343	return &gate_vma;
344}
345
346int in_gate_area(struct mm_struct *mm, unsigned long addr)
347{
348	return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
349}
350
351int in_gate_area_no_mm(unsigned long addr)
352{
353	return in_gate_area(NULL, addr);
354}
355#define is_gate_vma(vma)	((vma) == &gate_vma)
356#else
357#define is_gate_vma(vma)	0
358#endif
359
360const char *arch_vma_name(struct vm_area_struct *vma)
361{
362	return is_gate_vma(vma) ? "[vectors]" : NULL;
363}
364
365/* If possible, provide a placement hint at a random offset from the
366 * stack for the sigpage and vdso pages.
367 */
368static unsigned long sigpage_addr(const struct mm_struct *mm,
369				  unsigned int npages)
370{
371	unsigned long offset;
372	unsigned long first;
373	unsigned long last;
374	unsigned long addr;
375	unsigned int slots;
376
377	first = PAGE_ALIGN(mm->start_stack);
378
379	last = TASK_SIZE - (npages << PAGE_SHIFT);
380
381	/* No room after stack? */
382	if (first > last)
383		return 0;
384
385	/* Just enough room? */
386	if (first == last)
387		return first;
388
389	slots = ((last - first) >> PAGE_SHIFT) + 1;
390
391	offset = get_random_int() % slots;
392
393	addr = first + (offset << PAGE_SHIFT);
394
395	return addr;
396}
397
398static struct page *signal_page;
399extern struct page *get_signal_page(void);
400
 
 
 
 
 
 
 
401static const struct vm_special_mapping sigpage_mapping = {
402	.name = "[sigpage]",
403	.pages = &signal_page,
 
404};
405
406int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
407{
408	struct mm_struct *mm = current->mm;
409	struct vm_area_struct *vma;
410	unsigned long npages;
411	unsigned long addr;
412	unsigned long hint;
413	int ret = 0;
414
415	if (!signal_page)
416		signal_page = get_signal_page();
417	if (!signal_page)
418		return -ENOMEM;
419
420	npages = 1; /* for sigpage */
421	npages += vdso_total_pages;
422
423	down_write(&mm->mmap_sem);
 
424	hint = sigpage_addr(mm, npages);
425	addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
426	if (IS_ERR_VALUE(addr)) {
427		ret = addr;
428		goto up_fail;
429	}
430
431	vma = _install_special_mapping(mm, addr, PAGE_SIZE,
432		VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
433		&sigpage_mapping);
434
435	if (IS_ERR(vma)) {
436		ret = PTR_ERR(vma);
437		goto up_fail;
438	}
439
440	mm->context.sigpage = addr;
441
442	/* Unlike the sigpage, failure to install the vdso is unlikely
443	 * to be fatal to the process, so no error check needed
444	 * here.
445	 */
446	arm_install_vdso(mm, addr + PAGE_SIZE);
447
448 up_fail:
449	up_write(&mm->mmap_sem);
450	return ret;
451}
452#endif
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 *  linux/arch/arm/kernel/process.c
  4 *
  5 *  Copyright (C) 1996-2000 Russell King - Converted to ARM.
  6 *  Original Copyright (C) 1995  Linus Torvalds
 
 
 
 
  7 */
 
 
  8#include <linux/export.h>
  9#include <linux/sched.h>
 10#include <linux/sched/debug.h>
 11#include <linux/sched/task.h>
 12#include <linux/sched/task_stack.h>
 13#include <linux/kernel.h>
 14#include <linux/mm.h>
 15#include <linux/stddef.h>
 16#include <linux/unistd.h>
 17#include <linux/user.h>
 18#include <linux/interrupt.h>
 
 19#include <linux/init.h>
 20#include <linux/elfcore.h>
 21#include <linux/pm.h>
 22#include <linux/tick.h>
 23#include <linux/utsname.h>
 24#include <linux/uaccess.h>
 25#include <linux/random.h>
 26#include <linux/hw_breakpoint.h>
 27#include <linux/leds.h>
 28
 29#include <asm/processor.h>
 30#include <asm/thread_notify.h>
 31#include <asm/stacktrace.h>
 32#include <asm/system_misc.h>
 33#include <asm/mach/time.h>
 34#include <asm/tls.h>
 35#include <asm/vdso.h>
 36
 37#include "signal.h"
 38
 39#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
 40DEFINE_PER_CPU(struct task_struct *, __entry_task);
 41#endif
 42
 43#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
 44#include <linux/stackprotector.h>
 45unsigned long __stack_chk_guard __read_mostly;
 46EXPORT_SYMBOL(__stack_chk_guard);
 47#endif
 48
 49#ifndef CONFIG_CURRENT_POINTER_IN_TPIDRURO
 50asmlinkage struct task_struct *__current;
 51EXPORT_SYMBOL(__current);
 52#endif
 53
 54static const char *processor_modes[] __maybe_unused = {
 55  "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
 56  "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
 57  "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "MON_32" , "ABT_32" ,
 58  "UK8_32" , "UK9_32" , "HYP_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
 59};
 60
 61static const char *isa_modes[] __maybe_unused = {
 62  "ARM" , "Thumb" , "Jazelle", "ThumbEE"
 63};
 64
 65/*
 66 * This is our default idle handler.
 67 */
 68
 69void (*arm_pm_idle)(void);
 70
 71/*
 72 * Called from the core idle loop.
 73 */
 74
 75void arch_cpu_idle(void)
 76{
 77	if (arm_pm_idle)
 78		arm_pm_idle();
 79	else
 80		cpu_do_idle();
 81	raw_local_irq_enable();
 82}
 83
 84void arch_cpu_idle_prepare(void)
 85{
 86	local_fiq_enable();
 87}
 88
 89void arch_cpu_idle_enter(void)
 90{
 91	ledtrig_cpu(CPU_LED_IDLE_START);
 92#ifdef CONFIG_PL310_ERRATA_769419
 93	wmb();
 94#endif
 95}
 96
 97void arch_cpu_idle_exit(void)
 98{
 99	ledtrig_cpu(CPU_LED_IDLE_END);
100}
101
102void __show_regs_alloc_free(struct pt_regs *regs)
103{
104	int i;
105
106	/* check for r0 - r12 only */
107	for (i = 0; i < 13; i++) {
108		pr_alert("Register r%d information:", i);
109		mem_dump_obj((void *)regs->uregs[i]);
110	}
111}
112
113void __show_regs(struct pt_regs *regs)
114{
115	unsigned long flags;
116	char buf[64];
117#ifndef CONFIG_CPU_V7M
118	unsigned int domain;
119#ifdef CONFIG_CPU_SW_DOMAIN_PAN
120	/*
121	 * Get the domain register for the parent context. In user
122	 * mode, we don't save the DACR, so lets use what it should
123	 * be. For other modes, we place it after the pt_regs struct.
124	 */
125	if (user_mode(regs)) {
126		domain = DACR_UACCESS_ENABLE;
127	} else {
128		domain = to_svc_pt_regs(regs)->dacr;
129	}
130#else
131	domain = get_domain();
132#endif
133#endif
134
135	show_regs_print_info(KERN_DEFAULT);
136
137	printk("PC is at %pS\n", (void *)instruction_pointer(regs));
138	printk("LR is at %pS\n", (void *)regs->ARM_lr);
139	printk("pc : [<%08lx>]    lr : [<%08lx>]    psr: %08lx\n",
140	       regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr);
141	printk("sp : %08lx  ip : %08lx  fp : %08lx\n",
142	       regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
143	printk("r10: %08lx  r9 : %08lx  r8 : %08lx\n",
144		regs->ARM_r10, regs->ARM_r9,
145		regs->ARM_r8);
146	printk("r7 : %08lx  r6 : %08lx  r5 : %08lx  r4 : %08lx\n",
147		regs->ARM_r7, regs->ARM_r6,
148		regs->ARM_r5, regs->ARM_r4);
149	printk("r3 : %08lx  r2 : %08lx  r1 : %08lx  r0 : %08lx\n",
150		regs->ARM_r3, regs->ARM_r2,
151		regs->ARM_r1, regs->ARM_r0);
152
153	flags = regs->ARM_cpsr;
154	buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
155	buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
156	buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
157	buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
158	buf[4] = '\0';
159
160#ifndef CONFIG_CPU_V7M
161	{
162		const char *segment;
163
164		if ((domain & domain_mask(DOMAIN_USER)) ==
165		    domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
166			segment = "none";
 
 
167		else
168			segment = "user";
169
170		printk("Flags: %s  IRQs o%s  FIQs o%s  Mode %s  ISA %s  Segment %s\n",
171			buf, interrupts_enabled(regs) ? "n" : "ff",
172			fast_interrupts_enabled(regs) ? "n" : "ff",
173			processor_modes[processor_mode(regs)],
174			isa_modes[isa_mode(regs)], segment);
175	}
176#else
177	printk("xPSR: %08lx\n", regs->ARM_cpsr);
178#endif
179
180#ifdef CONFIG_CPU_CP15
181	{
182		unsigned int ctrl;
183
184		buf[0] = '\0';
185#ifdef CONFIG_CPU_CP15_MMU
186		{
187			unsigned int transbase;
188			asm("mrc p15, 0, %0, c2, c0\n\t"
189			    : "=r" (transbase));
190			snprintf(buf, sizeof(buf), "  Table: %08x  DAC: %08x",
191				transbase, domain);
192		}
193#endif
194		asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
195
196		printk("Control: %08x%s\n", ctrl, buf);
197	}
198#endif
199}
200
201void show_regs(struct pt_regs * regs)
202{
203	__show_regs(regs);
204	dump_backtrace(regs, NULL, KERN_DEFAULT);
205}
206
207ATOMIC_NOTIFIER_HEAD(thread_notify_head);
208
209EXPORT_SYMBOL_GPL(thread_notify_head);
210
211/*
212 * Free current thread data structures etc..
213 */
214void exit_thread(struct task_struct *tsk)
215{
216	thread_notify(THREAD_NOTIFY_EXIT, task_thread_info(tsk));
217}
218
219void flush_thread(void)
220{
221	struct thread_info *thread = current_thread_info();
222	struct task_struct *tsk = current;
223
224	flush_ptrace_hw_breakpoint(tsk);
225
226	memset(thread->used_cp, 0, sizeof(thread->used_cp));
227	memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
228	memset(&thread->fpstate, 0, sizeof(union fp_state));
229
230	flush_tls();
231
232	thread_notify(THREAD_NOTIFY_FLUSH, thread);
233}
234
 
 
 
 
235asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
236
237int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
 
 
238{
239	unsigned long clone_flags = args->flags;
240	unsigned long stack_start = args->stack;
241	unsigned long tls = args->tls;
242	struct thread_info *thread = task_thread_info(p);
243	struct pt_regs *childregs = task_pt_regs(p);
244
245	memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
246
247#ifdef CONFIG_CPU_USE_DOMAINS
248	/*
249	 * Copy the initial value of the domain access control register
250	 * from the current thread: thread->addr_limit will have been
251	 * copied from the current thread via setup_thread_stack() in
252	 * kernel/fork.c
253	 */
254	thread->cpu_domain = get_domain();
255#endif
256
257	if (likely(!args->fn)) {
258		*childregs = *current_pt_regs();
259		childregs->ARM_r0 = 0;
260		if (stack_start)
261			childregs->ARM_sp = stack_start;
262	} else {
263		memset(childregs, 0, sizeof(struct pt_regs));
264		thread->cpu_context.r4 = (unsigned long)args->fn_arg;
265		thread->cpu_context.r5 = (unsigned long)args->fn;
266		childregs->ARM_cpsr = SVC_MODE;
267	}
268	thread->cpu_context.pc = (unsigned long)ret_from_fork;
269	thread->cpu_context.sp = (unsigned long)childregs;
270
271	clear_ptrace_hw_breakpoint(p);
272
273	if (clone_flags & CLONE_SETTLS)
274		thread->tp_value[0] = tls;
275	thread->tp_value[1] = get_tpuser();
276
277	thread_notify(THREAD_NOTIFY_COPY, thread);
278
279	return 0;
280}
281
282unsigned long __get_wchan(struct task_struct *p)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
283{
284	struct stackframe frame;
285	unsigned long stack_page;
286	int count = 0;
 
 
287
288	frame.fp = thread_saved_fp(p);
289	frame.sp = thread_saved_sp(p);
290	frame.lr = 0;			/* recovered from the stack */
291	frame.pc = thread_saved_pc(p);
292	stack_page = (unsigned long)task_stack_page(p);
293	do {
294		if (frame.sp < stack_page ||
295		    frame.sp >= stack_page + THREAD_SIZE ||
296		    unwind_frame(&frame) < 0)
297			return 0;
298		if (!in_sched_functions(frame.pc))
299			return frame.pc;
300	} while (count ++ < 16);
301	return 0;
302}
303
 
 
 
 
 
 
304#ifdef CONFIG_MMU
305#ifdef CONFIG_KUSER_HELPERS
306/*
307 * The vectors page is always readable from user space for the
308 * atomic helpers. Insert it into the gate_vma so that it is visible
309 * through ptrace and /proc/<pid>/mem.
310 */
311static struct vm_area_struct gate_vma;
 
 
 
 
312
313static int __init gate_vma_init(void)
314{
315	vma_init(&gate_vma, NULL);
316	gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
317	gate_vma.vm_start = 0xffff0000;
318	gate_vma.vm_end	= 0xffff0000 + PAGE_SIZE;
319	gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
320	return 0;
321}
322arch_initcall(gate_vma_init);
323
324struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
325{
326	return &gate_vma;
327}
328
329int in_gate_area(struct mm_struct *mm, unsigned long addr)
330{
331	return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
332}
333
334int in_gate_area_no_mm(unsigned long addr)
335{
336	return in_gate_area(NULL, addr);
337}
338#define is_gate_vma(vma)	((vma) == &gate_vma)
339#else
340#define is_gate_vma(vma)	0
341#endif
342
343const char *arch_vma_name(struct vm_area_struct *vma)
344{
345	return is_gate_vma(vma) ? "[vectors]" : NULL;
346}
347
348/* If possible, provide a placement hint at a random offset from the
349 * stack for the sigpage and vdso pages.
350 */
351static unsigned long sigpage_addr(const struct mm_struct *mm,
352				  unsigned int npages)
353{
354	unsigned long offset;
355	unsigned long first;
356	unsigned long last;
357	unsigned long addr;
358	unsigned int slots;
359
360	first = PAGE_ALIGN(mm->start_stack);
361
362	last = TASK_SIZE - (npages << PAGE_SHIFT);
363
364	/* No room after stack? */
365	if (first > last)
366		return 0;
367
368	/* Just enough room? */
369	if (first == last)
370		return first;
371
372	slots = ((last - first) >> PAGE_SHIFT) + 1;
373
374	offset = get_random_u32_below(slots);
375
376	addr = first + (offset << PAGE_SHIFT);
377
378	return addr;
379}
380
381static struct page *signal_page;
382extern struct page *get_signal_page(void);
383
384static int sigpage_mremap(const struct vm_special_mapping *sm,
385		struct vm_area_struct *new_vma)
386{
387	current->mm->context.sigpage = new_vma->vm_start;
388	return 0;
389}
390
391static const struct vm_special_mapping sigpage_mapping = {
392	.name = "[sigpage]",
393	.pages = &signal_page,
394	.mremap = sigpage_mremap,
395};
396
397int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
398{
399	struct mm_struct *mm = current->mm;
400	struct vm_area_struct *vma;
401	unsigned long npages;
402	unsigned long addr;
403	unsigned long hint;
404	int ret = 0;
405
406	if (!signal_page)
407		signal_page = get_signal_page();
408	if (!signal_page)
409		return -ENOMEM;
410
411	npages = 1; /* for sigpage */
412	npages += vdso_total_pages;
413
414	if (mmap_write_lock_killable(mm))
415		return -EINTR;
416	hint = sigpage_addr(mm, npages);
417	addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
418	if (IS_ERR_VALUE(addr)) {
419		ret = addr;
420		goto up_fail;
421	}
422
423	vma = _install_special_mapping(mm, addr, PAGE_SIZE,
424		VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
425		&sigpage_mapping);
426
427	if (IS_ERR(vma)) {
428		ret = PTR_ERR(vma);
429		goto up_fail;
430	}
431
432	mm->context.sigpage = addr;
433
434	/* Unlike the sigpage, failure to install the vdso is unlikely
435	 * to be fatal to the process, so no error check needed
436	 * here.
437	 */
438	arm_install_vdso(mm, addr + PAGE_SIZE);
439
440 up_fail:
441	mmap_write_unlock(mm);
442	return ret;
443}
444#endif