Loading...
1/*
2 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
3 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5 * Copyright 2003 PathScale, Inc.
6 * Licensed under the GPL
7 */
8
9#include <linux/stddef.h>
10#include <linux/err.h>
11#include <linux/hardirq.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/personality.h>
15#include <linux/proc_fs.h>
16#include <linux/ptrace.h>
17#include <linux/random.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/sched/debug.h>
21#include <linux/sched/task.h>
22#include <linux/sched/task_stack.h>
23#include <linux/seq_file.h>
24#include <linux/tick.h>
25#include <linux/threads.h>
26#include <linux/tracehook.h>
27#include <asm/current.h>
28#include <asm/pgtable.h>
29#include <asm/mmu_context.h>
30#include <linux/uaccess.h>
31#include <as-layout.h>
32#include <kern_util.h>
33#include <os.h>
34#include <skas.h>
35#include <timer-internal.h>
36
37/*
38 * This is a per-cpu array. A processor only modifies its entry and it only
39 * cares about its entry, so it's OK if another processor is modifying its
40 * entry.
41 */
42struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
43
44static inline int external_pid(void)
45{
46 /* FIXME: Need to look up userspace_pid by cpu */
47 return userspace_pid[0];
48}
49
50int pid_to_processor_id(int pid)
51{
52 int i;
53
54 for (i = 0; i < ncpus; i++) {
55 if (cpu_tasks[i].pid == pid)
56 return i;
57 }
58 return -1;
59}
60
61void free_stack(unsigned long stack, int order)
62{
63 free_pages(stack, order);
64}
65
66unsigned long alloc_stack(int order, int atomic)
67{
68 unsigned long page;
69 gfp_t flags = GFP_KERNEL;
70
71 if (atomic)
72 flags = GFP_ATOMIC;
73 page = __get_free_pages(flags, order);
74
75 return page;
76}
77
78static inline void set_current(struct task_struct *task)
79{
80 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
81 { external_pid(), task });
82}
83
84extern void arch_switch_to(struct task_struct *to);
85
86void *__switch_to(struct task_struct *from, struct task_struct *to)
87{
88 to->thread.prev_sched = from;
89 set_current(to);
90
91 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
92 arch_switch_to(current);
93
94 return current->thread.prev_sched;
95}
96
97void interrupt_end(void)
98{
99 struct pt_regs *regs = ¤t->thread.regs;
100
101 if (need_resched())
102 schedule();
103 if (test_thread_flag(TIF_SIGPENDING))
104 do_signal(regs);
105 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
106 tracehook_notify_resume(regs);
107}
108
109int get_current_pid(void)
110{
111 return task_pid_nr(current);
112}
113
114/*
115 * This is called magically, by its address being stuffed in a jmp_buf
116 * and being longjmp-d to.
117 */
118void new_thread_handler(void)
119{
120 int (*fn)(void *), n;
121 void *arg;
122
123 if (current->thread.prev_sched != NULL)
124 schedule_tail(current->thread.prev_sched);
125 current->thread.prev_sched = NULL;
126
127 fn = current->thread.request.u.thread.proc;
128 arg = current->thread.request.u.thread.arg;
129
130 /*
131 * callback returns only if the kernel thread execs a process
132 */
133 n = fn(arg);
134 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
135}
136
137/* Called magically, see new_thread_handler above */
138void fork_handler(void)
139{
140 force_flush_all();
141
142 schedule_tail(current->thread.prev_sched);
143
144 /*
145 * XXX: if interrupt_end() calls schedule, this call to
146 * arch_switch_to isn't needed. We could want to apply this to
147 * improve performance. -bb
148 */
149 arch_switch_to(current);
150
151 current->thread.prev_sched = NULL;
152
153 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
154}
155
156int copy_thread(unsigned long clone_flags, unsigned long sp,
157 unsigned long arg, struct task_struct * p)
158{
159 void (*handler)(void);
160 int kthread = current->flags & PF_KTHREAD;
161 int ret = 0;
162
163 p->thread = (struct thread_struct) INIT_THREAD;
164
165 if (!kthread) {
166 memcpy(&p->thread.regs.regs, current_pt_regs(),
167 sizeof(p->thread.regs.regs));
168 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
169 if (sp != 0)
170 REGS_SP(p->thread.regs.regs.gp) = sp;
171
172 handler = fork_handler;
173
174 arch_copy_thread(¤t->thread.arch, &p->thread.arch);
175 } else {
176 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
177 p->thread.request.u.thread.proc = (int (*)(void *))sp;
178 p->thread.request.u.thread.arg = (void *)arg;
179 handler = new_thread_handler;
180 }
181
182 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
183
184 if (!kthread) {
185 clear_flushed_tls(p);
186
187 /*
188 * Set a new TLS for the child thread?
189 */
190 if (clone_flags & CLONE_SETTLS)
191 ret = arch_copy_tls(p);
192 }
193
194 return ret;
195}
196
197void initial_thread_cb(void (*proc)(void *), void *arg)
198{
199 int save_kmalloc_ok = kmalloc_ok;
200
201 kmalloc_ok = 0;
202 initial_thread_cb_skas(proc, arg);
203 kmalloc_ok = save_kmalloc_ok;
204}
205
206void arch_cpu_idle(void)
207{
208 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
209 os_idle_sleep(UM_NSEC_PER_SEC);
210 local_irq_enable();
211}
212
213int __cant_sleep(void) {
214 return in_atomic() || irqs_disabled() || in_interrupt();
215 /* Is in_interrupt() really needed? */
216}
217
218int user_context(unsigned long sp)
219{
220 unsigned long stack;
221
222 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
223 return stack != (unsigned long) current_thread_info();
224}
225
226extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
227
228void do_uml_exitcalls(void)
229{
230 exitcall_t *call;
231
232 call = &__uml_exitcall_end;
233 while (--call >= &__uml_exitcall_begin)
234 (*call)();
235}
236
237char *uml_strdup(const char *string)
238{
239 return kstrdup(string, GFP_KERNEL);
240}
241EXPORT_SYMBOL(uml_strdup);
242
243int copy_to_user_proc(void __user *to, void *from, int size)
244{
245 return copy_to_user(to, from, size);
246}
247
248int copy_from_user_proc(void *to, void __user *from, int size)
249{
250 return copy_from_user(to, from, size);
251}
252
253int clear_user_proc(void __user *buf, int size)
254{
255 return clear_user(buf, size);
256}
257
258int cpu(void)
259{
260 return current_thread_info()->cpu;
261}
262
263static atomic_t using_sysemu = ATOMIC_INIT(0);
264int sysemu_supported;
265
266void set_using_sysemu(int value)
267{
268 if (value > sysemu_supported)
269 return;
270 atomic_set(&using_sysemu, value);
271}
272
273int get_using_sysemu(void)
274{
275 return atomic_read(&using_sysemu);
276}
277
278static int sysemu_proc_show(struct seq_file *m, void *v)
279{
280 seq_printf(m, "%d\n", get_using_sysemu());
281 return 0;
282}
283
284static int sysemu_proc_open(struct inode *inode, struct file *file)
285{
286 return single_open(file, sysemu_proc_show, NULL);
287}
288
289static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
290 size_t count, loff_t *pos)
291{
292 char tmp[2];
293
294 if (copy_from_user(tmp, buf, 1))
295 return -EFAULT;
296
297 if (tmp[0] >= '0' && tmp[0] <= '2')
298 set_using_sysemu(tmp[0] - '0');
299 /* We use the first char, but pretend to write everything */
300 return count;
301}
302
303static const struct file_operations sysemu_proc_fops = {
304 .owner = THIS_MODULE,
305 .open = sysemu_proc_open,
306 .read = seq_read,
307 .llseek = seq_lseek,
308 .release = single_release,
309 .write = sysemu_proc_write,
310};
311
312int __init make_proc_sysemu(void)
313{
314 struct proc_dir_entry *ent;
315 if (!sysemu_supported)
316 return 0;
317
318 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
319
320 if (ent == NULL)
321 {
322 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
323 return 0;
324 }
325
326 return 0;
327}
328
329late_initcall(make_proc_sysemu);
330
331int singlestepping(void * t)
332{
333 struct task_struct *task = t ? t : current;
334
335 if (!(task->ptrace & PT_DTRACE))
336 return 0;
337
338 if (task->thread.singlestep_syscall)
339 return 1;
340
341 return 2;
342}
343
344/*
345 * Only x86 and x86_64 have an arch_align_stack().
346 * All other arches have "#define arch_align_stack(x) (x)"
347 * in their asm/exec.h
348 * As this is included in UML from asm-um/system-generic.h,
349 * we can use it to behave as the subarch does.
350 */
351#ifndef arch_align_stack
352unsigned long arch_align_stack(unsigned long sp)
353{
354 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
355 sp -= get_random_int() % 8192;
356 return sp & ~0xf;
357}
358#endif
359
360unsigned long get_wchan(struct task_struct *p)
361{
362 unsigned long stack_page, sp, ip;
363 bool seen_sched = 0;
364
365 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
366 return 0;
367
368 stack_page = (unsigned long) task_stack_page(p);
369 /* Bail if the process has no kernel stack for some reason */
370 if (stack_page == 0)
371 return 0;
372
373 sp = p->thread.switch_buf->JB_SP;
374 /*
375 * Bail if the stack pointer is below the bottom of the kernel
376 * stack for some reason
377 */
378 if (sp < stack_page)
379 return 0;
380
381 while (sp < stack_page + THREAD_SIZE) {
382 ip = *((unsigned long *) sp);
383 if (in_sched_functions(ip))
384 /* Ignore everything until we're above the scheduler */
385 seen_sched = 1;
386 else if (kernel_text_address(ip) && seen_sched)
387 return ip;
388
389 sp += sizeof(unsigned long);
390 }
391
392 return 0;
393}
394
395int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
396{
397 int cpu = current_thread_info()->cpu;
398
399 return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
400}
401
1/*
2 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
3 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
5 * Copyright 2003 PathScale, Inc.
6 * Licensed under the GPL
7 */
8
9#include <linux/stddef.h>
10#include <linux/err.h>
11#include <linux/hardirq.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/personality.h>
15#include <linux/proc_fs.h>
16#include <linux/ptrace.h>
17#include <linux/random.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/seq_file.h>
21#include <linux/tick.h>
22#include <linux/threads.h>
23#include <linux/tracehook.h>
24#include <asm/current.h>
25#include <asm/pgtable.h>
26#include <asm/mmu_context.h>
27#include <linux/uaccess.h>
28#include <as-layout.h>
29#include <kern_util.h>
30#include <os.h>
31#include <skas.h>
32#include <timer-internal.h>
33
34/*
35 * This is a per-cpu array. A processor only modifies its entry and it only
36 * cares about its entry, so it's OK if another processor is modifying its
37 * entry.
38 */
39struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
40
41static inline int external_pid(void)
42{
43 /* FIXME: Need to look up userspace_pid by cpu */
44 return userspace_pid[0];
45}
46
47int pid_to_processor_id(int pid)
48{
49 int i;
50
51 for (i = 0; i < ncpus; i++) {
52 if (cpu_tasks[i].pid == pid)
53 return i;
54 }
55 return -1;
56}
57
58void free_stack(unsigned long stack, int order)
59{
60 free_pages(stack, order);
61}
62
63unsigned long alloc_stack(int order, int atomic)
64{
65 unsigned long page;
66 gfp_t flags = GFP_KERNEL;
67
68 if (atomic)
69 flags = GFP_ATOMIC;
70 page = __get_free_pages(flags, order);
71
72 return page;
73}
74
75static inline void set_current(struct task_struct *task)
76{
77 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
78 { external_pid(), task });
79}
80
81extern void arch_switch_to(struct task_struct *to);
82
83void *__switch_to(struct task_struct *from, struct task_struct *to)
84{
85 to->thread.prev_sched = from;
86 set_current(to);
87
88 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
89 arch_switch_to(current);
90
91 return current->thread.prev_sched;
92}
93
94void interrupt_end(void)
95{
96 struct pt_regs *regs = ¤t->thread.regs;
97
98 if (need_resched())
99 schedule();
100 if (test_thread_flag(TIF_SIGPENDING))
101 do_signal(regs);
102 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
103 tracehook_notify_resume(regs);
104}
105
106int get_current_pid(void)
107{
108 return task_pid_nr(current);
109}
110
111/*
112 * This is called magically, by its address being stuffed in a jmp_buf
113 * and being longjmp-d to.
114 */
115void new_thread_handler(void)
116{
117 int (*fn)(void *), n;
118 void *arg;
119
120 if (current->thread.prev_sched != NULL)
121 schedule_tail(current->thread.prev_sched);
122 current->thread.prev_sched = NULL;
123
124 fn = current->thread.request.u.thread.proc;
125 arg = current->thread.request.u.thread.arg;
126
127 /*
128 * callback returns only if the kernel thread execs a process
129 */
130 n = fn(arg);
131 userspace(¤t->thread.regs.regs);
132}
133
134/* Called magically, see new_thread_handler above */
135void fork_handler(void)
136{
137 force_flush_all();
138
139 schedule_tail(current->thread.prev_sched);
140
141 /*
142 * XXX: if interrupt_end() calls schedule, this call to
143 * arch_switch_to isn't needed. We could want to apply this to
144 * improve performance. -bb
145 */
146 arch_switch_to(current);
147
148 current->thread.prev_sched = NULL;
149
150 userspace(¤t->thread.regs.regs);
151}
152
153int copy_thread(unsigned long clone_flags, unsigned long sp,
154 unsigned long arg, struct task_struct * p)
155{
156 void (*handler)(void);
157 int kthread = current->flags & PF_KTHREAD;
158 int ret = 0;
159
160 p->thread = (struct thread_struct) INIT_THREAD;
161
162 if (!kthread) {
163 memcpy(&p->thread.regs.regs, current_pt_regs(),
164 sizeof(p->thread.regs.regs));
165 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
166 if (sp != 0)
167 REGS_SP(p->thread.regs.regs.gp) = sp;
168
169 handler = fork_handler;
170
171 arch_copy_thread(¤t->thread.arch, &p->thread.arch);
172 } else {
173 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
174 p->thread.request.u.thread.proc = (int (*)(void *))sp;
175 p->thread.request.u.thread.arg = (void *)arg;
176 handler = new_thread_handler;
177 }
178
179 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
180
181 if (!kthread) {
182 clear_flushed_tls(p);
183
184 /*
185 * Set a new TLS for the child thread?
186 */
187 if (clone_flags & CLONE_SETTLS)
188 ret = arch_copy_tls(p);
189 }
190
191 return ret;
192}
193
194void initial_thread_cb(void (*proc)(void *), void *arg)
195{
196 int save_kmalloc_ok = kmalloc_ok;
197
198 kmalloc_ok = 0;
199 initial_thread_cb_skas(proc, arg);
200 kmalloc_ok = save_kmalloc_ok;
201}
202
203void arch_cpu_idle(void)
204{
205 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
206 os_idle_sleep(UM_NSEC_PER_SEC);
207 local_irq_enable();
208}
209
210int __cant_sleep(void) {
211 return in_atomic() || irqs_disabled() || in_interrupt();
212 /* Is in_interrupt() really needed? */
213}
214
215int user_context(unsigned long sp)
216{
217 unsigned long stack;
218
219 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
220 return stack != (unsigned long) current_thread_info();
221}
222
223extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
224
225void do_uml_exitcalls(void)
226{
227 exitcall_t *call;
228
229 call = &__uml_exitcall_end;
230 while (--call >= &__uml_exitcall_begin)
231 (*call)();
232}
233
234char *uml_strdup(const char *string)
235{
236 return kstrdup(string, GFP_KERNEL);
237}
238EXPORT_SYMBOL(uml_strdup);
239
240int copy_to_user_proc(void __user *to, void *from, int size)
241{
242 return copy_to_user(to, from, size);
243}
244
245int copy_from_user_proc(void *to, void __user *from, int size)
246{
247 return copy_from_user(to, from, size);
248}
249
250int clear_user_proc(void __user *buf, int size)
251{
252 return clear_user(buf, size);
253}
254
255int strlen_user_proc(char __user *str)
256{
257 return strlen_user(str);
258}
259
260int cpu(void)
261{
262 return current_thread_info()->cpu;
263}
264
265static atomic_t using_sysemu = ATOMIC_INIT(0);
266int sysemu_supported;
267
268void set_using_sysemu(int value)
269{
270 if (value > sysemu_supported)
271 return;
272 atomic_set(&using_sysemu, value);
273}
274
275int get_using_sysemu(void)
276{
277 return atomic_read(&using_sysemu);
278}
279
280static int sysemu_proc_show(struct seq_file *m, void *v)
281{
282 seq_printf(m, "%d\n", get_using_sysemu());
283 return 0;
284}
285
286static int sysemu_proc_open(struct inode *inode, struct file *file)
287{
288 return single_open(file, sysemu_proc_show, NULL);
289}
290
291static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
292 size_t count, loff_t *pos)
293{
294 char tmp[2];
295
296 if (copy_from_user(tmp, buf, 1))
297 return -EFAULT;
298
299 if (tmp[0] >= '0' && tmp[0] <= '2')
300 set_using_sysemu(tmp[0] - '0');
301 /* We use the first char, but pretend to write everything */
302 return count;
303}
304
305static const struct file_operations sysemu_proc_fops = {
306 .owner = THIS_MODULE,
307 .open = sysemu_proc_open,
308 .read = seq_read,
309 .llseek = seq_lseek,
310 .release = single_release,
311 .write = sysemu_proc_write,
312};
313
314int __init make_proc_sysemu(void)
315{
316 struct proc_dir_entry *ent;
317 if (!sysemu_supported)
318 return 0;
319
320 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
321
322 if (ent == NULL)
323 {
324 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
325 return 0;
326 }
327
328 return 0;
329}
330
331late_initcall(make_proc_sysemu);
332
333int singlestepping(void * t)
334{
335 struct task_struct *task = t ? t : current;
336
337 if (!(task->ptrace & PT_DTRACE))
338 return 0;
339
340 if (task->thread.singlestep_syscall)
341 return 1;
342
343 return 2;
344}
345
346/*
347 * Only x86 and x86_64 have an arch_align_stack().
348 * All other arches have "#define arch_align_stack(x) (x)"
349 * in their asm/exec.h
350 * As this is included in UML from asm-um/system-generic.h,
351 * we can use it to behave as the subarch does.
352 */
353#ifndef arch_align_stack
354unsigned long arch_align_stack(unsigned long sp)
355{
356 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
357 sp -= get_random_int() % 8192;
358 return sp & ~0xf;
359}
360#endif
361
362unsigned long get_wchan(struct task_struct *p)
363{
364 unsigned long stack_page, sp, ip;
365 bool seen_sched = 0;
366
367 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
368 return 0;
369
370 stack_page = (unsigned long) task_stack_page(p);
371 /* Bail if the process has no kernel stack for some reason */
372 if (stack_page == 0)
373 return 0;
374
375 sp = p->thread.switch_buf->JB_SP;
376 /*
377 * Bail if the stack pointer is below the bottom of the kernel
378 * stack for some reason
379 */
380 if (sp < stack_page)
381 return 0;
382
383 while (sp < stack_page + THREAD_SIZE) {
384 ip = *((unsigned long *) sp);
385 if (in_sched_functions(ip))
386 /* Ignore everything until we're above the scheduler */
387 seen_sched = 1;
388 else if (kernel_text_address(ip) && seen_sched)
389 return ip;
390
391 sp += sizeof(unsigned long);
392 }
393
394 return 0;
395}
396
397int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
398{
399 int cpu = current_thread_info()->cpu;
400
401 return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
402}
403