Loading...
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#include <linux/stddef.h>
8#include <linux/err.h>
9#include <linux/hardirq.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/personality.h>
13#include <linux/proc_fs.h>
14#include <linux/ptrace.h>
15#include <linux/random.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/seq_file.h>
19#include <linux/tick.h>
20#include <linux/threads.h>
21#include <linux/tracehook.h>
22#include <asm/current.h>
23#include <asm/pgtable.h>
24#include <asm/mmu_context.h>
25#include <asm/uaccess.h>
26#include <as-layout.h>
27#include <kern_util.h>
28#include <os.h>
29#include <skas.h>
30
31/*
32 * This is a per-cpu array. A processor only modifies its entry and it only
33 * cares about its entry, so it's OK if another processor is modifying its
34 * entry.
35 */
36struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
37
38static inline int external_pid(void)
39{
40 /* FIXME: Need to look up userspace_pid by cpu */
41 return userspace_pid[0];
42}
43
44int pid_to_processor_id(int pid)
45{
46 int i;
47
48 for (i = 0; i < ncpus; i++) {
49 if (cpu_tasks[i].pid == pid)
50 return i;
51 }
52 return -1;
53}
54
55void free_stack(unsigned long stack, int order)
56{
57 free_pages(stack, order);
58}
59
60unsigned long alloc_stack(int order, int atomic)
61{
62 unsigned long page;
63 gfp_t flags = GFP_KERNEL;
64
65 if (atomic)
66 flags = GFP_ATOMIC;
67 page = __get_free_pages(flags, order);
68
69 return page;
70}
71
72static inline void set_current(struct task_struct *task)
73{
74 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
75 { external_pid(), task });
76}
77
78extern void arch_switch_to(struct task_struct *to);
79
80void *__switch_to(struct task_struct *from, struct task_struct *to)
81{
82 to->thread.prev_sched = from;
83 set_current(to);
84
85 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
86 arch_switch_to(current);
87
88 return current->thread.prev_sched;
89}
90
91void interrupt_end(void)
92{
93 if (need_resched())
94 schedule();
95 if (test_thread_flag(TIF_SIGPENDING))
96 do_signal();
97 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
98 tracehook_notify_resume(¤t->thread.regs);
99}
100
101void exit_thread(void)
102{
103}
104
105int get_current_pid(void)
106{
107 return task_pid_nr(current);
108}
109
110/*
111 * This is called magically, by its address being stuffed in a jmp_buf
112 * and being longjmp-d to.
113 */
114void new_thread_handler(void)
115{
116 int (*fn)(void *), n;
117 void *arg;
118
119 if (current->thread.prev_sched != NULL)
120 schedule_tail(current->thread.prev_sched);
121 current->thread.prev_sched = NULL;
122
123 fn = current->thread.request.u.thread.proc;
124 arg = current->thread.request.u.thread.arg;
125
126 /*
127 * callback returns only if the kernel thread execs a process
128 */
129 n = fn(arg);
130 userspace(¤t->thread.regs.regs);
131}
132
133/* Called magically, see new_thread_handler above */
134void fork_handler(void)
135{
136 force_flush_all();
137
138 schedule_tail(current->thread.prev_sched);
139
140 /*
141 * XXX: if interrupt_end() calls schedule, this call to
142 * arch_switch_to isn't needed. We could want to apply this to
143 * improve performance. -bb
144 */
145 arch_switch_to(current);
146
147 current->thread.prev_sched = NULL;
148
149 userspace(¤t->thread.regs.regs);
150}
151
152int copy_thread(unsigned long clone_flags, unsigned long sp,
153 unsigned long arg, struct task_struct * p)
154{
155 void (*handler)(void);
156 int kthread = current->flags & PF_KTHREAD;
157 int ret = 0;
158
159 p->thread = (struct thread_struct) INIT_THREAD;
160
161 if (!kthread) {
162 memcpy(&p->thread.regs.regs, current_pt_regs(),
163 sizeof(p->thread.regs.regs));
164 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
165 if (sp != 0)
166 REGS_SP(p->thread.regs.regs.gp) = sp;
167
168 handler = fork_handler;
169
170 arch_copy_thread(¤t->thread.arch, &p->thread.arch);
171 } else {
172 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
173 p->thread.request.u.thread.proc = (int (*)(void *))sp;
174 p->thread.request.u.thread.arg = (void *)arg;
175 handler = new_thread_handler;
176 }
177
178 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
179
180 if (!kthread) {
181 clear_flushed_tls(p);
182
183 /*
184 * Set a new TLS for the child thread?
185 */
186 if (clone_flags & CLONE_SETTLS)
187 ret = arch_copy_tls(p);
188 }
189
190 return ret;
191}
192
193void initial_thread_cb(void (*proc)(void *), void *arg)
194{
195 int save_kmalloc_ok = kmalloc_ok;
196
197 kmalloc_ok = 0;
198 initial_thread_cb_skas(proc, arg);
199 kmalloc_ok = save_kmalloc_ok;
200}
201
202void arch_cpu_idle(void)
203{
204 unsigned long long nsecs;
205
206 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
207 nsecs = disable_timer();
208 idle_sleep(nsecs);
209 local_irq_enable();
210}
211
212int __cant_sleep(void) {
213 return in_atomic() || irqs_disabled() || in_interrupt();
214 /* Is in_interrupt() really needed? */
215}
216
217int user_context(unsigned long sp)
218{
219 unsigned long stack;
220
221 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
222 return stack != (unsigned long) current_thread_info();
223}
224
225extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
226
227void do_uml_exitcalls(void)
228{
229 exitcall_t *call;
230
231 call = &__uml_exitcall_end;
232 while (--call >= &__uml_exitcall_begin)
233 (*call)();
234}
235
236char *uml_strdup(const char *string)
237{
238 return kstrdup(string, GFP_KERNEL);
239}
240EXPORT_SYMBOL(uml_strdup);
241
242int copy_to_user_proc(void __user *to, void *from, int size)
243{
244 return copy_to_user(to, from, size);
245}
246
247int copy_from_user_proc(void *to, void __user *from, int size)
248{
249 return copy_from_user(to, from, size);
250}
251
252int clear_user_proc(void __user *buf, int size)
253{
254 return clear_user(buf, size);
255}
256
257int strlen_user_proc(char __user *str)
258{
259 return strlen_user(str);
260}
261
262int smp_sigio_handler(void)
263{
264#ifdef CONFIG_SMP
265 int cpu = current_thread_info()->cpu;
266 IPI_handler(cpu);
267 if (cpu != 0)
268 return 1;
269#endif
270 return 0;
271}
272
273int cpu(void)
274{
275 return current_thread_info()->cpu;
276}
277
278static atomic_t using_sysemu = ATOMIC_INIT(0);
279int sysemu_supported;
280
281void set_using_sysemu(int value)
282{
283 if (value > sysemu_supported)
284 return;
285 atomic_set(&using_sysemu, value);
286}
287
288int get_using_sysemu(void)
289{
290 return atomic_read(&using_sysemu);
291}
292
293static int sysemu_proc_show(struct seq_file *m, void *v)
294{
295 seq_printf(m, "%d\n", get_using_sysemu());
296 return 0;
297}
298
299static int sysemu_proc_open(struct inode *inode, struct file *file)
300{
301 return single_open(file, sysemu_proc_show, NULL);
302}
303
304static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
305 size_t count, loff_t *pos)
306{
307 char tmp[2];
308
309 if (copy_from_user(tmp, buf, 1))
310 return -EFAULT;
311
312 if (tmp[0] >= '0' && tmp[0] <= '2')
313 set_using_sysemu(tmp[0] - '0');
314 /* We use the first char, but pretend to write everything */
315 return count;
316}
317
318static const struct file_operations sysemu_proc_fops = {
319 .owner = THIS_MODULE,
320 .open = sysemu_proc_open,
321 .read = seq_read,
322 .llseek = seq_lseek,
323 .release = single_release,
324 .write = sysemu_proc_write,
325};
326
327int __init make_proc_sysemu(void)
328{
329 struct proc_dir_entry *ent;
330 if (!sysemu_supported)
331 return 0;
332
333 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
334
335 if (ent == NULL)
336 {
337 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
338 return 0;
339 }
340
341 return 0;
342}
343
344late_initcall(make_proc_sysemu);
345
346int singlestepping(void * t)
347{
348 struct task_struct *task = t ? t : current;
349
350 if (!(task->ptrace & PT_DTRACE))
351 return 0;
352
353 if (task->thread.singlestep_syscall)
354 return 1;
355
356 return 2;
357}
358
359/*
360 * Only x86 and x86_64 have an arch_align_stack().
361 * All other arches have "#define arch_align_stack(x) (x)"
362 * in their asm/exec.h
363 * As this is included in UML from asm-um/system-generic.h,
364 * we can use it to behave as the subarch does.
365 */
366#ifndef arch_align_stack
367unsigned long arch_align_stack(unsigned long sp)
368{
369 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
370 sp -= get_random_int() % 8192;
371 return sp & ~0xf;
372}
373#endif
374
375unsigned long get_wchan(struct task_struct *p)
376{
377 unsigned long stack_page, sp, ip;
378 bool seen_sched = 0;
379
380 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
381 return 0;
382
383 stack_page = (unsigned long) task_stack_page(p);
384 /* Bail if the process has no kernel stack for some reason */
385 if (stack_page == 0)
386 return 0;
387
388 sp = p->thread.switch_buf->JB_SP;
389 /*
390 * Bail if the stack pointer is below the bottom of the kernel
391 * stack for some reason
392 */
393 if (sp < stack_page)
394 return 0;
395
396 while (sp < stack_page + THREAD_SIZE) {
397 ip = *((unsigned long *) sp);
398 if (in_sched_functions(ip))
399 /* Ignore everything until we're above the scheduler */
400 seen_sched = 1;
401 else if (kernel_text_address(ip) && seen_sched)
402 return ip;
403
404 sp += sizeof(unsigned long);
405 }
406
407 return 0;
408}
409
410int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
411{
412 int cpu = current_thread_info()->cpu;
413
414 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
415}
416
1/*
2 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
3 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5 * Copyright 2003 PathScale, Inc.
6 * Licensed under the GPL
7 */
8
9#include <linux/stddef.h>
10#include <linux/err.h>
11#include <linux/hardirq.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/personality.h>
15#include <linux/proc_fs.h>
16#include <linux/ptrace.h>
17#include <linux/random.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/seq_file.h>
21#include <linux/tick.h>
22#include <linux/threads.h>
23#include <linux/tracehook.h>
24#include <asm/current.h>
25#include <asm/pgtable.h>
26#include <asm/mmu_context.h>
27#include <asm/uaccess.h>
28#include <as-layout.h>
29#include <kern_util.h>
30#include <os.h>
31#include <skas.h>
32#include <timer-internal.h>
33
34/*
35 * This is a per-cpu array. A processor only modifies its entry and it only
36 * cares about its entry, so it's OK if another processor is modifying its
37 * entry.
38 */
39struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
40
41static inline int external_pid(void)
42{
43 /* FIXME: Need to look up userspace_pid by cpu */
44 return userspace_pid[0];
45}
46
47int pid_to_processor_id(int pid)
48{
49 int i;
50
51 for (i = 0; i < ncpus; i++) {
52 if (cpu_tasks[i].pid == pid)
53 return i;
54 }
55 return -1;
56}
57
58void free_stack(unsigned long stack, int order)
59{
60 free_pages(stack, order);
61}
62
63unsigned long alloc_stack(int order, int atomic)
64{
65 unsigned long page;
66 gfp_t flags = GFP_KERNEL;
67
68 if (atomic)
69 flags = GFP_ATOMIC;
70 page = __get_free_pages(flags, order);
71
72 return page;
73}
74
75static inline void set_current(struct task_struct *task)
76{
77 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
78 { external_pid(), task });
79}
80
81extern void arch_switch_to(struct task_struct *to);
82
83void *__switch_to(struct task_struct *from, struct task_struct *to)
84{
85 to->thread.prev_sched = from;
86 set_current(to);
87
88 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
89 arch_switch_to(current);
90
91 return current->thread.prev_sched;
92}
93
94void interrupt_end(void)
95{
96 struct pt_regs *regs = ¤t->thread.regs;
97
98 if (need_resched())
99 schedule();
100 if (test_thread_flag(TIF_SIGPENDING))
101 do_signal(regs);
102 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
103 tracehook_notify_resume(regs);
104}
105
106void exit_thread(void)
107{
108}
109
110int get_current_pid(void)
111{
112 return task_pid_nr(current);
113}
114
115/*
116 * This is called magically, by its address being stuffed in a jmp_buf
117 * and being longjmp-d to.
118 */
119void new_thread_handler(void)
120{
121 int (*fn)(void *), n;
122 void *arg;
123
124 if (current->thread.prev_sched != NULL)
125 schedule_tail(current->thread.prev_sched);
126 current->thread.prev_sched = NULL;
127
128 fn = current->thread.request.u.thread.proc;
129 arg = current->thread.request.u.thread.arg;
130
131 /*
132 * callback returns only if the kernel thread execs a process
133 */
134 n = fn(arg);
135 userspace(¤t->thread.regs.regs);
136}
137
138/* Called magically, see new_thread_handler above */
139void fork_handler(void)
140{
141 force_flush_all();
142
143 schedule_tail(current->thread.prev_sched);
144
145 /*
146 * XXX: if interrupt_end() calls schedule, this call to
147 * arch_switch_to isn't needed. We could want to apply this to
148 * improve performance. -bb
149 */
150 arch_switch_to(current);
151
152 current->thread.prev_sched = NULL;
153
154 userspace(¤t->thread.regs.regs);
155}
156
157int copy_thread(unsigned long clone_flags, unsigned long sp,
158 unsigned long arg, struct task_struct * p)
159{
160 void (*handler)(void);
161 int kthread = current->flags & PF_KTHREAD;
162 int ret = 0;
163
164 p->thread = (struct thread_struct) INIT_THREAD;
165
166 if (!kthread) {
167 memcpy(&p->thread.regs.regs, current_pt_regs(),
168 sizeof(p->thread.regs.regs));
169 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
170 if (sp != 0)
171 REGS_SP(p->thread.regs.regs.gp) = sp;
172
173 handler = fork_handler;
174
175 arch_copy_thread(¤t->thread.arch, &p->thread.arch);
176 } else {
177 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
178 p->thread.request.u.thread.proc = (int (*)(void *))sp;
179 p->thread.request.u.thread.arg = (void *)arg;
180 handler = new_thread_handler;
181 }
182
183 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
184
185 if (!kthread) {
186 clear_flushed_tls(p);
187
188 /*
189 * Set a new TLS for the child thread?
190 */
191 if (clone_flags & CLONE_SETTLS)
192 ret = arch_copy_tls(p);
193 }
194
195 return ret;
196}
197
198void initial_thread_cb(void (*proc)(void *), void *arg)
199{
200 int save_kmalloc_ok = kmalloc_ok;
201
202 kmalloc_ok = 0;
203 initial_thread_cb_skas(proc, arg);
204 kmalloc_ok = save_kmalloc_ok;
205}
206
207void arch_cpu_idle(void)
208{
209 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
210 os_idle_sleep(UM_NSEC_PER_SEC);
211 local_irq_enable();
212}
213
214int __cant_sleep(void) {
215 return in_atomic() || irqs_disabled() || in_interrupt();
216 /* Is in_interrupt() really needed? */
217}
218
219int user_context(unsigned long sp)
220{
221 unsigned long stack;
222
223 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
224 return stack != (unsigned long) current_thread_info();
225}
226
227extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
228
229void do_uml_exitcalls(void)
230{
231 exitcall_t *call;
232
233 call = &__uml_exitcall_end;
234 while (--call >= &__uml_exitcall_begin)
235 (*call)();
236}
237
238char *uml_strdup(const char *string)
239{
240 return kstrdup(string, GFP_KERNEL);
241}
242EXPORT_SYMBOL(uml_strdup);
243
244int copy_to_user_proc(void __user *to, void *from, int size)
245{
246 return copy_to_user(to, from, size);
247}
248
249int copy_from_user_proc(void *to, void __user *from, int size)
250{
251 return copy_from_user(to, from, size);
252}
253
254int clear_user_proc(void __user *buf, int size)
255{
256 return clear_user(buf, size);
257}
258
259int strlen_user_proc(char __user *str)
260{
261 return strlen_user(str);
262}
263
264int cpu(void)
265{
266 return current_thread_info()->cpu;
267}
268
269static atomic_t using_sysemu = ATOMIC_INIT(0);
270int sysemu_supported;
271
272void set_using_sysemu(int value)
273{
274 if (value > sysemu_supported)
275 return;
276 atomic_set(&using_sysemu, value);
277}
278
279int get_using_sysemu(void)
280{
281 return atomic_read(&using_sysemu);
282}
283
284static int sysemu_proc_show(struct seq_file *m, void *v)
285{
286 seq_printf(m, "%d\n", get_using_sysemu());
287 return 0;
288}
289
290static int sysemu_proc_open(struct inode *inode, struct file *file)
291{
292 return single_open(file, sysemu_proc_show, NULL);
293}
294
295static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
296 size_t count, loff_t *pos)
297{
298 char tmp[2];
299
300 if (copy_from_user(tmp, buf, 1))
301 return -EFAULT;
302
303 if (tmp[0] >= '0' && tmp[0] <= '2')
304 set_using_sysemu(tmp[0] - '0');
305 /* We use the first char, but pretend to write everything */
306 return count;
307}
308
309static const struct file_operations sysemu_proc_fops = {
310 .owner = THIS_MODULE,
311 .open = sysemu_proc_open,
312 .read = seq_read,
313 .llseek = seq_lseek,
314 .release = single_release,
315 .write = sysemu_proc_write,
316};
317
318int __init make_proc_sysemu(void)
319{
320 struct proc_dir_entry *ent;
321 if (!sysemu_supported)
322 return 0;
323
324 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
325
326 if (ent == NULL)
327 {
328 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
329 return 0;
330 }
331
332 return 0;
333}
334
335late_initcall(make_proc_sysemu);
336
337int singlestepping(void * t)
338{
339 struct task_struct *task = t ? t : current;
340
341 if (!(task->ptrace & PT_DTRACE))
342 return 0;
343
344 if (task->thread.singlestep_syscall)
345 return 1;
346
347 return 2;
348}
349
350/*
351 * Only x86 and x86_64 have an arch_align_stack().
352 * All other arches have "#define arch_align_stack(x) (x)"
353 * in their asm/exec.h
354 * As this is included in UML from asm-um/system-generic.h,
355 * we can use it to behave as the subarch does.
356 */
357#ifndef arch_align_stack
358unsigned long arch_align_stack(unsigned long sp)
359{
360 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
361 sp -= get_random_int() % 8192;
362 return sp & ~0xf;
363}
364#endif
365
366unsigned long get_wchan(struct task_struct *p)
367{
368 unsigned long stack_page, sp, ip;
369 bool seen_sched = 0;
370
371 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
372 return 0;
373
374 stack_page = (unsigned long) task_stack_page(p);
375 /* Bail if the process has no kernel stack for some reason */
376 if (stack_page == 0)
377 return 0;
378
379 sp = p->thread.switch_buf->JB_SP;
380 /*
381 * Bail if the stack pointer is below the bottom of the kernel
382 * stack for some reason
383 */
384 if (sp < stack_page)
385 return 0;
386
387 while (sp < stack_page + THREAD_SIZE) {
388 ip = *((unsigned long *) sp);
389 if (in_sched_functions(ip))
390 /* Ignore everything until we're above the scheduler */
391 seen_sched = 1;
392 else if (kernel_text_address(ip) && seen_sched)
393 return ip;
394
395 sp += sizeof(unsigned long);
396 }
397
398 return 0;
399}
400
401int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
402{
403 int cpu = current_thread_info()->cpu;
404
405 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
406}
407