Loading...
1/*
2 * linux/arch/arm/kernel/process.c
3 *
4 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
5 * Original Copyright (C) 1995 Linus Torvalds
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11#include <stdarg.h>
12
13#include <linux/export.h>
14#include <linux/sched.h>
15#include <linux/sched/debug.h>
16#include <linux/sched/task.h>
17#include <linux/sched/task_stack.h>
18#include <linux/kernel.h>
19#include <linux/mm.h>
20#include <linux/stddef.h>
21#include <linux/unistd.h>
22#include <linux/user.h>
23#include <linux/interrupt.h>
24#include <linux/init.h>
25#include <linux/elfcore.h>
26#include <linux/pm.h>
27#include <linux/tick.h>
28#include <linux/utsname.h>
29#include <linux/uaccess.h>
30#include <linux/random.h>
31#include <linux/hw_breakpoint.h>
32#include <linux/leds.h>
33
34#include <asm/processor.h>
35#include <asm/thread_notify.h>
36#include <asm/stacktrace.h>
37#include <asm/system_misc.h>
38#include <asm/mach/time.h>
39#include <asm/tls.h>
40#include <asm/vdso.h>
41
42#ifdef CONFIG_CC_STACKPROTECTOR
43#include <linux/stackprotector.h>
44unsigned long __stack_chk_guard __read_mostly;
45EXPORT_SYMBOL(__stack_chk_guard);
46#endif
47
48static const char *processor_modes[] __maybe_unused = {
49 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
50 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
51 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "MON_32" , "ABT_32" ,
52 "UK8_32" , "UK9_32" , "HYP_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
53};
54
55static const char *isa_modes[] __maybe_unused = {
56 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
57};
58
59/*
60 * This is our default idle handler.
61 */
62
63void (*arm_pm_idle)(void);
64
65/*
66 * Called from the core idle loop.
67 */
68
69void arch_cpu_idle(void)
70{
71 if (arm_pm_idle)
72 arm_pm_idle();
73 else
74 cpu_do_idle();
75 local_irq_enable();
76}
77
78void arch_cpu_idle_prepare(void)
79{
80 local_fiq_enable();
81}
82
83void arch_cpu_idle_enter(void)
84{
85 ledtrig_cpu(CPU_LED_IDLE_START);
86#ifdef CONFIG_PL310_ERRATA_769419
87 wmb();
88#endif
89}
90
91void arch_cpu_idle_exit(void)
92{
93 ledtrig_cpu(CPU_LED_IDLE_END);
94}
95
96void __show_regs(struct pt_regs *regs)
97{
98 unsigned long flags;
99 char buf[64];
100#ifndef CONFIG_CPU_V7M
101 unsigned int domain, fs;
102#ifdef CONFIG_CPU_SW_DOMAIN_PAN
103 /*
104 * Get the domain register for the parent context. In user
105 * mode, we don't save the DACR, so lets use what it should
106 * be. For other modes, we place it after the pt_regs struct.
107 */
108 if (user_mode(regs)) {
109 domain = DACR_UACCESS_ENABLE;
110 fs = get_fs();
111 } else {
112 domain = to_svc_pt_regs(regs)->dacr;
113 fs = to_svc_pt_regs(regs)->addr_limit;
114 }
115#else
116 domain = get_domain();
117 fs = get_fs();
118#endif
119#endif
120
121 show_regs_print_info(KERN_DEFAULT);
122
123 printk("PC is at %pS\n", (void *)instruction_pointer(regs));
124 printk("LR is at %pS\n", (void *)regs->ARM_lr);
125 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n",
126 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr);
127 printk("sp : %08lx ip : %08lx fp : %08lx\n",
128 regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
129 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
130 regs->ARM_r10, regs->ARM_r9,
131 regs->ARM_r8);
132 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
133 regs->ARM_r7, regs->ARM_r6,
134 regs->ARM_r5, regs->ARM_r4);
135 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
136 regs->ARM_r3, regs->ARM_r2,
137 regs->ARM_r1, regs->ARM_r0);
138
139 flags = regs->ARM_cpsr;
140 buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
141 buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
142 buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
143 buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
144 buf[4] = '\0';
145
146#ifndef CONFIG_CPU_V7M
147 {
148 const char *segment;
149
150 if ((domain & domain_mask(DOMAIN_USER)) ==
151 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
152 segment = "none";
153 else if (fs == get_ds())
154 segment = "kernel";
155 else
156 segment = "user";
157
158 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
159 buf, interrupts_enabled(regs) ? "n" : "ff",
160 fast_interrupts_enabled(regs) ? "n" : "ff",
161 processor_modes[processor_mode(regs)],
162 isa_modes[isa_mode(regs)], segment);
163 }
164#else
165 printk("xPSR: %08lx\n", regs->ARM_cpsr);
166#endif
167
168#ifdef CONFIG_CPU_CP15
169 {
170 unsigned int ctrl;
171
172 buf[0] = '\0';
173#ifdef CONFIG_CPU_CP15_MMU
174 {
175 unsigned int transbase;
176 asm("mrc p15, 0, %0, c2, c0\n\t"
177 : "=r" (transbase));
178 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
179 transbase, domain);
180 }
181#endif
182 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
183
184 printk("Control: %08x%s\n", ctrl, buf);
185 }
186#endif
187}
188
189void show_regs(struct pt_regs * regs)
190{
191 __show_regs(regs);
192 dump_stack();
193}
194
195ATOMIC_NOTIFIER_HEAD(thread_notify_head);
196
197EXPORT_SYMBOL_GPL(thread_notify_head);
198
199/*
200 * Free current thread data structures etc..
201 */
202void exit_thread(struct task_struct *tsk)
203{
204 thread_notify(THREAD_NOTIFY_EXIT, task_thread_info(tsk));
205}
206
207void flush_thread(void)
208{
209 struct thread_info *thread = current_thread_info();
210 struct task_struct *tsk = current;
211
212 flush_ptrace_hw_breakpoint(tsk);
213
214 memset(thread->used_cp, 0, sizeof(thread->used_cp));
215 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
216 memset(&thread->fpstate, 0, sizeof(union fp_state));
217
218 flush_tls();
219
220 thread_notify(THREAD_NOTIFY_FLUSH, thread);
221}
222
223void release_thread(struct task_struct *dead_task)
224{
225}
226
227asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
228
229int
230copy_thread(unsigned long clone_flags, unsigned long stack_start,
231 unsigned long stk_sz, struct task_struct *p)
232{
233 struct thread_info *thread = task_thread_info(p);
234 struct pt_regs *childregs = task_pt_regs(p);
235
236 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
237
238#ifdef CONFIG_CPU_USE_DOMAINS
239 /*
240 * Copy the initial value of the domain access control register
241 * from the current thread: thread->addr_limit will have been
242 * copied from the current thread via setup_thread_stack() in
243 * kernel/fork.c
244 */
245 thread->cpu_domain = get_domain();
246#endif
247
248 if (likely(!(p->flags & PF_KTHREAD))) {
249 *childregs = *current_pt_regs();
250 childregs->ARM_r0 = 0;
251 if (stack_start)
252 childregs->ARM_sp = stack_start;
253 } else {
254 memset(childregs, 0, sizeof(struct pt_regs));
255 thread->cpu_context.r4 = stk_sz;
256 thread->cpu_context.r5 = stack_start;
257 childregs->ARM_cpsr = SVC_MODE;
258 }
259 thread->cpu_context.pc = (unsigned long)ret_from_fork;
260 thread->cpu_context.sp = (unsigned long)childregs;
261
262 clear_ptrace_hw_breakpoint(p);
263
264 if (clone_flags & CLONE_SETTLS)
265 thread->tp_value[0] = childregs->ARM_r3;
266 thread->tp_value[1] = get_tpuser();
267
268 thread_notify(THREAD_NOTIFY_COPY, thread);
269
270 return 0;
271}
272
273/*
274 * Fill in the task's elfregs structure for a core dump.
275 */
276int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs)
277{
278 elf_core_copy_regs(elfregs, task_pt_regs(t));
279 return 1;
280}
281
282/*
283 * fill in the fpe structure for a core dump...
284 */
285int dump_fpu (struct pt_regs *regs, struct user_fp *fp)
286{
287 struct thread_info *thread = current_thread_info();
288 int used_math = thread->used_cp[1] | thread->used_cp[2];
289
290 if (used_math)
291 memcpy(fp, &thread->fpstate.soft, sizeof (*fp));
292
293 return used_math != 0;
294}
295EXPORT_SYMBOL(dump_fpu);
296
297unsigned long get_wchan(struct task_struct *p)
298{
299 struct stackframe frame;
300 unsigned long stack_page;
301 int count = 0;
302 if (!p || p == current || p->state == TASK_RUNNING)
303 return 0;
304
305 frame.fp = thread_saved_fp(p);
306 frame.sp = thread_saved_sp(p);
307 frame.lr = 0; /* recovered from the stack */
308 frame.pc = thread_saved_pc(p);
309 stack_page = (unsigned long)task_stack_page(p);
310 do {
311 if (frame.sp < stack_page ||
312 frame.sp >= stack_page + THREAD_SIZE ||
313 unwind_frame(&frame) < 0)
314 return 0;
315 if (!in_sched_functions(frame.pc))
316 return frame.pc;
317 } while (count ++ < 16);
318 return 0;
319}
320
321unsigned long arch_randomize_brk(struct mm_struct *mm)
322{
323 return randomize_page(mm->brk, 0x02000000);
324}
325
326#ifdef CONFIG_MMU
327#ifdef CONFIG_KUSER_HELPERS
328/*
329 * The vectors page is always readable from user space for the
330 * atomic helpers. Insert it into the gate_vma so that it is visible
331 * through ptrace and /proc/<pid>/mem.
332 */
333static struct vm_area_struct gate_vma = {
334 .vm_start = 0xffff0000,
335 .vm_end = 0xffff0000 + PAGE_SIZE,
336 .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
337};
338
339static int __init gate_vma_init(void)
340{
341 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
342 return 0;
343}
344arch_initcall(gate_vma_init);
345
346struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
347{
348 return &gate_vma;
349}
350
351int in_gate_area(struct mm_struct *mm, unsigned long addr)
352{
353 return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
354}
355
356int in_gate_area_no_mm(unsigned long addr)
357{
358 return in_gate_area(NULL, addr);
359}
360#define is_gate_vma(vma) ((vma) == &gate_vma)
361#else
362#define is_gate_vma(vma) 0
363#endif
364
365const char *arch_vma_name(struct vm_area_struct *vma)
366{
367 return is_gate_vma(vma) ? "[vectors]" : NULL;
368}
369
370/* If possible, provide a placement hint at a random offset from the
371 * stack for the sigpage and vdso pages.
372 */
373static unsigned long sigpage_addr(const struct mm_struct *mm,
374 unsigned int npages)
375{
376 unsigned long offset;
377 unsigned long first;
378 unsigned long last;
379 unsigned long addr;
380 unsigned int slots;
381
382 first = PAGE_ALIGN(mm->start_stack);
383
384 last = TASK_SIZE - (npages << PAGE_SHIFT);
385
386 /* No room after stack? */
387 if (first > last)
388 return 0;
389
390 /* Just enough room? */
391 if (first == last)
392 return first;
393
394 slots = ((last - first) >> PAGE_SHIFT) + 1;
395
396 offset = get_random_int() % slots;
397
398 addr = first + (offset << PAGE_SHIFT);
399
400 return addr;
401}
402
403static struct page *signal_page;
404extern struct page *get_signal_page(void);
405
406static int sigpage_mremap(const struct vm_special_mapping *sm,
407 struct vm_area_struct *new_vma)
408{
409 current->mm->context.sigpage = new_vma->vm_start;
410 return 0;
411}
412
413static const struct vm_special_mapping sigpage_mapping = {
414 .name = "[sigpage]",
415 .pages = &signal_page,
416 .mremap = sigpage_mremap,
417};
418
419int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
420{
421 struct mm_struct *mm = current->mm;
422 struct vm_area_struct *vma;
423 unsigned long npages;
424 unsigned long addr;
425 unsigned long hint;
426 int ret = 0;
427
428 if (!signal_page)
429 signal_page = get_signal_page();
430 if (!signal_page)
431 return -ENOMEM;
432
433 npages = 1; /* for sigpage */
434 npages += vdso_total_pages;
435
436 if (down_write_killable(&mm->mmap_sem))
437 return -EINTR;
438 hint = sigpage_addr(mm, npages);
439 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
440 if (IS_ERR_VALUE(addr)) {
441 ret = addr;
442 goto up_fail;
443 }
444
445 vma = _install_special_mapping(mm, addr, PAGE_SIZE,
446 VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
447 &sigpage_mapping);
448
449 if (IS_ERR(vma)) {
450 ret = PTR_ERR(vma);
451 goto up_fail;
452 }
453
454 mm->context.sigpage = addr;
455
456 /* Unlike the sigpage, failure to install the vdso is unlikely
457 * to be fatal to the process, so no error check needed
458 * here.
459 */
460 arm_install_vdso(mm, addr + PAGE_SIZE);
461
462 up_fail:
463 up_write(&mm->mmap_sem);
464 return ret;
465}
466#endif
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * linux/arch/arm/kernel/process.c
4 *
5 * Copyright (C) 1996-2000 Russell King - Converted to ARM.
6 * Original Copyright (C) 1995 Linus Torvalds
7 */
8#include <linux/export.h>
9#include <linux/sched.h>
10#include <linux/sched/debug.h>
11#include <linux/sched/task.h>
12#include <linux/sched/task_stack.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/stddef.h>
16#include <linux/unistd.h>
17#include <linux/user.h>
18#include <linux/interrupt.h>
19#include <linux/init.h>
20#include <linux/elfcore.h>
21#include <linux/pm.h>
22#include <linux/tick.h>
23#include <linux/utsname.h>
24#include <linux/uaccess.h>
25#include <linux/random.h>
26#include <linux/hw_breakpoint.h>
27#include <linux/leds.h>
28
29#include <asm/processor.h>
30#include <asm/thread_notify.h>
31#include <asm/stacktrace.h>
32#include <asm/system_misc.h>
33#include <asm/mach/time.h>
34#include <asm/tls.h>
35#include <asm/vdso.h>
36
37#include "signal.h"
38
39#if defined(CONFIG_CURRENT_POINTER_IN_TPIDRURO) || defined(CONFIG_SMP)
40DEFINE_PER_CPU(struct task_struct *, __entry_task);
41#endif
42
43#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK)
44#include <linux/stackprotector.h>
45unsigned long __stack_chk_guard __read_mostly;
46EXPORT_SYMBOL(__stack_chk_guard);
47#endif
48
49#ifndef CONFIG_CURRENT_POINTER_IN_TPIDRURO
50asmlinkage struct task_struct *__current;
51EXPORT_SYMBOL(__current);
52#endif
53
54static const char *processor_modes[] __maybe_unused = {
55 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
56 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
57 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "MON_32" , "ABT_32" ,
58 "UK8_32" , "UK9_32" , "HYP_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
59};
60
61static const char *isa_modes[] __maybe_unused = {
62 "ARM" , "Thumb" , "Jazelle", "ThumbEE"
63};
64
65/*
66 * This is our default idle handler.
67 */
68
69void (*arm_pm_idle)(void);
70
71/*
72 * Called from the core idle loop.
73 */
74
75void arch_cpu_idle(void)
76{
77 if (arm_pm_idle)
78 arm_pm_idle();
79 else
80 cpu_do_idle();
81 raw_local_irq_enable();
82}
83
84void arch_cpu_idle_prepare(void)
85{
86 local_fiq_enable();
87}
88
89void arch_cpu_idle_enter(void)
90{
91 ledtrig_cpu(CPU_LED_IDLE_START);
92#ifdef CONFIG_PL310_ERRATA_769419
93 wmb();
94#endif
95}
96
97void arch_cpu_idle_exit(void)
98{
99 ledtrig_cpu(CPU_LED_IDLE_END);
100}
101
102void __show_regs_alloc_free(struct pt_regs *regs)
103{
104 int i;
105
106 /* check for r0 - r12 only */
107 for (i = 0; i < 13; i++) {
108 pr_alert("Register r%d information:", i);
109 mem_dump_obj((void *)regs->uregs[i]);
110 }
111}
112
113void __show_regs(struct pt_regs *regs)
114{
115 unsigned long flags;
116 char buf[64];
117#ifndef CONFIG_CPU_V7M
118 unsigned int domain;
119#ifdef CONFIG_CPU_SW_DOMAIN_PAN
120 /*
121 * Get the domain register for the parent context. In user
122 * mode, we don't save the DACR, so lets use what it should
123 * be. For other modes, we place it after the pt_regs struct.
124 */
125 if (user_mode(regs)) {
126 domain = DACR_UACCESS_ENABLE;
127 } else {
128 domain = to_svc_pt_regs(regs)->dacr;
129 }
130#else
131 domain = get_domain();
132#endif
133#endif
134
135 show_regs_print_info(KERN_DEFAULT);
136
137 printk("PC is at %pS\n", (void *)instruction_pointer(regs));
138 printk("LR is at %pS\n", (void *)regs->ARM_lr);
139 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n",
140 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr);
141 printk("sp : %08lx ip : %08lx fp : %08lx\n",
142 regs->ARM_sp, regs->ARM_ip, regs->ARM_fp);
143 printk("r10: %08lx r9 : %08lx r8 : %08lx\n",
144 regs->ARM_r10, regs->ARM_r9,
145 regs->ARM_r8);
146 printk("r7 : %08lx r6 : %08lx r5 : %08lx r4 : %08lx\n",
147 regs->ARM_r7, regs->ARM_r6,
148 regs->ARM_r5, regs->ARM_r4);
149 printk("r3 : %08lx r2 : %08lx r1 : %08lx r0 : %08lx\n",
150 regs->ARM_r3, regs->ARM_r2,
151 regs->ARM_r1, regs->ARM_r0);
152
153 flags = regs->ARM_cpsr;
154 buf[0] = flags & PSR_N_BIT ? 'N' : 'n';
155 buf[1] = flags & PSR_Z_BIT ? 'Z' : 'z';
156 buf[2] = flags & PSR_C_BIT ? 'C' : 'c';
157 buf[3] = flags & PSR_V_BIT ? 'V' : 'v';
158 buf[4] = '\0';
159
160#ifndef CONFIG_CPU_V7M
161 {
162 const char *segment;
163
164 if ((domain & domain_mask(DOMAIN_USER)) ==
165 domain_val(DOMAIN_USER, DOMAIN_NOACCESS))
166 segment = "none";
167 else
168 segment = "user";
169
170 printk("Flags: %s IRQs o%s FIQs o%s Mode %s ISA %s Segment %s\n",
171 buf, interrupts_enabled(regs) ? "n" : "ff",
172 fast_interrupts_enabled(regs) ? "n" : "ff",
173 processor_modes[processor_mode(regs)],
174 isa_modes[isa_mode(regs)], segment);
175 }
176#else
177 printk("xPSR: %08lx\n", regs->ARM_cpsr);
178#endif
179
180#ifdef CONFIG_CPU_CP15
181 {
182 unsigned int ctrl;
183
184 buf[0] = '\0';
185#ifdef CONFIG_CPU_CP15_MMU
186 {
187 unsigned int transbase;
188 asm("mrc p15, 0, %0, c2, c0\n\t"
189 : "=r" (transbase));
190 snprintf(buf, sizeof(buf), " Table: %08x DAC: %08x",
191 transbase, domain);
192 }
193#endif
194 asm("mrc p15, 0, %0, c1, c0\n" : "=r" (ctrl));
195
196 printk("Control: %08x%s\n", ctrl, buf);
197 }
198#endif
199}
200
201void show_regs(struct pt_regs * regs)
202{
203 __show_regs(regs);
204 dump_backtrace(regs, NULL, KERN_DEFAULT);
205}
206
207ATOMIC_NOTIFIER_HEAD(thread_notify_head);
208
209EXPORT_SYMBOL_GPL(thread_notify_head);
210
211/*
212 * Free current thread data structures etc..
213 */
214void exit_thread(struct task_struct *tsk)
215{
216 thread_notify(THREAD_NOTIFY_EXIT, task_thread_info(tsk));
217}
218
219void flush_thread(void)
220{
221 struct thread_info *thread = current_thread_info();
222 struct task_struct *tsk = current;
223
224 flush_ptrace_hw_breakpoint(tsk);
225
226 memset(thread->used_cp, 0, sizeof(thread->used_cp));
227 memset(&tsk->thread.debug, 0, sizeof(struct debug_info));
228 memset(&thread->fpstate, 0, sizeof(union fp_state));
229
230 flush_tls();
231
232 thread_notify(THREAD_NOTIFY_FLUSH, thread);
233}
234
235asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
236
237int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
238{
239 unsigned long clone_flags = args->flags;
240 unsigned long stack_start = args->stack;
241 unsigned long tls = args->tls;
242 struct thread_info *thread = task_thread_info(p);
243 struct pt_regs *childregs = task_pt_regs(p);
244
245 memset(&thread->cpu_context, 0, sizeof(struct cpu_context_save));
246
247#ifdef CONFIG_CPU_USE_DOMAINS
248 /*
249 * Copy the initial value of the domain access control register
250 * from the current thread: thread->addr_limit will have been
251 * copied from the current thread via setup_thread_stack() in
252 * kernel/fork.c
253 */
254 thread->cpu_domain = get_domain();
255#endif
256
257 if (likely(!args->fn)) {
258 *childregs = *current_pt_regs();
259 childregs->ARM_r0 = 0;
260 if (stack_start)
261 childregs->ARM_sp = stack_start;
262 } else {
263 memset(childregs, 0, sizeof(struct pt_regs));
264 thread->cpu_context.r4 = (unsigned long)args->fn_arg;
265 thread->cpu_context.r5 = (unsigned long)args->fn;
266 childregs->ARM_cpsr = SVC_MODE;
267 }
268 thread->cpu_context.pc = (unsigned long)ret_from_fork;
269 thread->cpu_context.sp = (unsigned long)childregs;
270
271 clear_ptrace_hw_breakpoint(p);
272
273 if (clone_flags & CLONE_SETTLS)
274 thread->tp_value[0] = tls;
275 thread->tp_value[1] = get_tpuser();
276
277 thread_notify(THREAD_NOTIFY_COPY, thread);
278
279 return 0;
280}
281
282unsigned long __get_wchan(struct task_struct *p)
283{
284 struct stackframe frame;
285 unsigned long stack_page;
286 int count = 0;
287
288 frame.fp = thread_saved_fp(p);
289 frame.sp = thread_saved_sp(p);
290 frame.lr = 0; /* recovered from the stack */
291 frame.pc = thread_saved_pc(p);
292 stack_page = (unsigned long)task_stack_page(p);
293 do {
294 if (frame.sp < stack_page ||
295 frame.sp >= stack_page + THREAD_SIZE ||
296 unwind_frame(&frame) < 0)
297 return 0;
298 if (!in_sched_functions(frame.pc))
299 return frame.pc;
300 } while (count ++ < 16);
301 return 0;
302}
303
304#ifdef CONFIG_MMU
305#ifdef CONFIG_KUSER_HELPERS
306/*
307 * The vectors page is always readable from user space for the
308 * atomic helpers. Insert it into the gate_vma so that it is visible
309 * through ptrace and /proc/<pid>/mem.
310 */
311static struct vm_area_struct gate_vma;
312
313static int __init gate_vma_init(void)
314{
315 vma_init(&gate_vma, NULL);
316 gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
317 gate_vma.vm_start = 0xffff0000;
318 gate_vma.vm_end = 0xffff0000 + PAGE_SIZE;
319 gate_vma.vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
320 return 0;
321}
322arch_initcall(gate_vma_init);
323
324struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
325{
326 return &gate_vma;
327}
328
329int in_gate_area(struct mm_struct *mm, unsigned long addr)
330{
331 return (addr >= gate_vma.vm_start) && (addr < gate_vma.vm_end);
332}
333
334int in_gate_area_no_mm(unsigned long addr)
335{
336 return in_gate_area(NULL, addr);
337}
338#define is_gate_vma(vma) ((vma) == &gate_vma)
339#else
340#define is_gate_vma(vma) 0
341#endif
342
343const char *arch_vma_name(struct vm_area_struct *vma)
344{
345 return is_gate_vma(vma) ? "[vectors]" : NULL;
346}
347
348/* If possible, provide a placement hint at a random offset from the
349 * stack for the sigpage and vdso pages.
350 */
351static unsigned long sigpage_addr(const struct mm_struct *mm,
352 unsigned int npages)
353{
354 unsigned long offset;
355 unsigned long first;
356 unsigned long last;
357 unsigned long addr;
358 unsigned int slots;
359
360 first = PAGE_ALIGN(mm->start_stack);
361
362 last = TASK_SIZE - (npages << PAGE_SHIFT);
363
364 /* No room after stack? */
365 if (first > last)
366 return 0;
367
368 /* Just enough room? */
369 if (first == last)
370 return first;
371
372 slots = ((last - first) >> PAGE_SHIFT) + 1;
373
374 offset = get_random_u32_below(slots);
375
376 addr = first + (offset << PAGE_SHIFT);
377
378 return addr;
379}
380
381static struct page *signal_page;
382extern struct page *get_signal_page(void);
383
384static int sigpage_mremap(const struct vm_special_mapping *sm,
385 struct vm_area_struct *new_vma)
386{
387 current->mm->context.sigpage = new_vma->vm_start;
388 return 0;
389}
390
391static const struct vm_special_mapping sigpage_mapping = {
392 .name = "[sigpage]",
393 .pages = &signal_page,
394 .mremap = sigpage_mremap,
395};
396
397int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
398{
399 struct mm_struct *mm = current->mm;
400 struct vm_area_struct *vma;
401 unsigned long npages;
402 unsigned long addr;
403 unsigned long hint;
404 int ret = 0;
405
406 if (!signal_page)
407 signal_page = get_signal_page();
408 if (!signal_page)
409 return -ENOMEM;
410
411 npages = 1; /* for sigpage */
412 npages += vdso_total_pages;
413
414 if (mmap_write_lock_killable(mm))
415 return -EINTR;
416 hint = sigpage_addr(mm, npages);
417 addr = get_unmapped_area(NULL, hint, npages << PAGE_SHIFT, 0, 0);
418 if (IS_ERR_VALUE(addr)) {
419 ret = addr;
420 goto up_fail;
421 }
422
423 vma = _install_special_mapping(mm, addr, PAGE_SIZE,
424 VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
425 &sigpage_mapping);
426
427 if (IS_ERR(vma)) {
428 ret = PTR_ERR(vma);
429 goto up_fail;
430 }
431
432 mm->context.sigpage = addr;
433
434 /* Unlike the sigpage, failure to install the vdso is unlikely
435 * to be fatal to the process, so no error check needed
436 * here.
437 */
438 arm_install_vdso(mm, addr + PAGE_SIZE);
439
440 up_fail:
441 mmap_write_unlock(mm);
442 return ret;
443}
444#endif