Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Copyright 2003 PathScale, Inc.
7 */
8
9#include <linux/stddef.h>
10#include <linux/err.h>
11#include <linux/hardirq.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/personality.h>
15#include <linux/proc_fs.h>
16#include <linux/ptrace.h>
17#include <linux/random.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/sched/debug.h>
21#include <linux/sched/task.h>
22#include <linux/sched/task_stack.h>
23#include <linux/seq_file.h>
24#include <linux/tick.h>
25#include <linux/threads.h>
26#include <linux/tracehook.h>
27#include <asm/current.h>
28#include <asm/mmu_context.h>
29#include <linux/uaccess.h>
30#include <as-layout.h>
31#include <kern_util.h>
32#include <os.h>
33#include <skas.h>
34#include <linux/time-internal.h>
35
36/*
37 * This is a per-cpu array. A processor only modifies its entry and it only
38 * cares about its entry, so it's OK if another processor is modifying its
39 * entry.
40 */
41struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
42
43static inline int external_pid(void)
44{
45 /* FIXME: Need to look up userspace_pid by cpu */
46 return userspace_pid[0];
47}
48
49int pid_to_processor_id(int pid)
50{
51 int i;
52
53 for (i = 0; i < ncpus; i++) {
54 if (cpu_tasks[i].pid == pid)
55 return i;
56 }
57 return -1;
58}
59
60void free_stack(unsigned long stack, int order)
61{
62 free_pages(stack, order);
63}
64
65unsigned long alloc_stack(int order, int atomic)
66{
67 unsigned long page;
68 gfp_t flags = GFP_KERNEL;
69
70 if (atomic)
71 flags = GFP_ATOMIC;
72 page = __get_free_pages(flags, order);
73
74 return page;
75}
76
77static inline void set_current(struct task_struct *task)
78{
79 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
80 { external_pid(), task });
81}
82
83extern void arch_switch_to(struct task_struct *to);
84
85void *__switch_to(struct task_struct *from, struct task_struct *to)
86{
87 to->thread.prev_sched = from;
88 set_current(to);
89
90 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
91 arch_switch_to(current);
92
93 return current->thread.prev_sched;
94}
95
96void interrupt_end(void)
97{
98 struct pt_regs *regs = ¤t->thread.regs;
99
100 if (need_resched())
101 schedule();
102 if (test_thread_flag(TIF_SIGPENDING))
103 do_signal(regs);
104 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
105 tracehook_notify_resume(regs);
106}
107
108int get_current_pid(void)
109{
110 return task_pid_nr(current);
111}
112
113/*
114 * This is called magically, by its address being stuffed in a jmp_buf
115 * and being longjmp-d to.
116 */
117void new_thread_handler(void)
118{
119 int (*fn)(void *), n;
120 void *arg;
121
122 if (current->thread.prev_sched != NULL)
123 schedule_tail(current->thread.prev_sched);
124 current->thread.prev_sched = NULL;
125
126 fn = current->thread.request.u.thread.proc;
127 arg = current->thread.request.u.thread.arg;
128
129 /*
130 * callback returns only if the kernel thread execs a process
131 */
132 n = fn(arg);
133 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
134}
135
136/* Called magically, see new_thread_handler above */
137void fork_handler(void)
138{
139 force_flush_all();
140
141 schedule_tail(current->thread.prev_sched);
142
143 /*
144 * XXX: if interrupt_end() calls schedule, this call to
145 * arch_switch_to isn't needed. We could want to apply this to
146 * improve performance. -bb
147 */
148 arch_switch_to(current);
149
150 current->thread.prev_sched = NULL;
151
152 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
153}
154
155int copy_thread(unsigned long clone_flags, unsigned long sp,
156 unsigned long arg, struct task_struct * p, unsigned long tls)
157{
158 void (*handler)(void);
159 int kthread = current->flags & PF_KTHREAD;
160 int ret = 0;
161
162 p->thread = (struct thread_struct) INIT_THREAD;
163
164 if (!kthread) {
165 memcpy(&p->thread.regs.regs, current_pt_regs(),
166 sizeof(p->thread.regs.regs));
167 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
168 if (sp != 0)
169 REGS_SP(p->thread.regs.regs.gp) = sp;
170
171 handler = fork_handler;
172
173 arch_copy_thread(¤t->thread.arch, &p->thread.arch);
174 } else {
175 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
176 p->thread.request.u.thread.proc = (int (*)(void *))sp;
177 p->thread.request.u.thread.arg = (void *)arg;
178 handler = new_thread_handler;
179 }
180
181 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
182
183 if (!kthread) {
184 clear_flushed_tls(p);
185
186 /*
187 * Set a new TLS for the child thread?
188 */
189 if (clone_flags & CLONE_SETTLS)
190 ret = arch_set_tls(p, tls);
191 }
192
193 return ret;
194}
195
196void initial_thread_cb(void (*proc)(void *), void *arg)
197{
198 int save_kmalloc_ok = kmalloc_ok;
199
200 kmalloc_ok = 0;
201 initial_thread_cb_skas(proc, arg);
202 kmalloc_ok = save_kmalloc_ok;
203}
204
205static void um_idle_sleep(void)
206{
207 unsigned long long duration = UM_NSEC_PER_SEC;
208
209 if (time_travel_mode != TT_MODE_OFF) {
210 time_travel_sleep(duration);
211 } else {
212 os_idle_sleep(duration);
213 }
214}
215
216void arch_cpu_idle(void)
217{
218 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
219 um_idle_sleep();
220 local_irq_enable();
221}
222
223int __cant_sleep(void) {
224 return in_atomic() || irqs_disabled() || in_interrupt();
225 /* Is in_interrupt() really needed? */
226}
227
228int user_context(unsigned long sp)
229{
230 unsigned long stack;
231
232 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
233 return stack != (unsigned long) current_thread_info();
234}
235
236extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
237
238void do_uml_exitcalls(void)
239{
240 exitcall_t *call;
241
242 call = &__uml_exitcall_end;
243 while (--call >= &__uml_exitcall_begin)
244 (*call)();
245}
246
247char *uml_strdup(const char *string)
248{
249 return kstrdup(string, GFP_KERNEL);
250}
251EXPORT_SYMBOL(uml_strdup);
252
253int copy_to_user_proc(void __user *to, void *from, int size)
254{
255 return copy_to_user(to, from, size);
256}
257
258int copy_from_user_proc(void *to, void __user *from, int size)
259{
260 return copy_from_user(to, from, size);
261}
262
263int clear_user_proc(void __user *buf, int size)
264{
265 return clear_user(buf, size);
266}
267
268int cpu(void)
269{
270 return current_thread_info()->cpu;
271}
272
273static atomic_t using_sysemu = ATOMIC_INIT(0);
274int sysemu_supported;
275
276void set_using_sysemu(int value)
277{
278 if (value > sysemu_supported)
279 return;
280 atomic_set(&using_sysemu, value);
281}
282
283int get_using_sysemu(void)
284{
285 return atomic_read(&using_sysemu);
286}
287
288static int sysemu_proc_show(struct seq_file *m, void *v)
289{
290 seq_printf(m, "%d\n", get_using_sysemu());
291 return 0;
292}
293
294static int sysemu_proc_open(struct inode *inode, struct file *file)
295{
296 return single_open(file, sysemu_proc_show, NULL);
297}
298
299static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
300 size_t count, loff_t *pos)
301{
302 char tmp[2];
303
304 if (copy_from_user(tmp, buf, 1))
305 return -EFAULT;
306
307 if (tmp[0] >= '0' && tmp[0] <= '2')
308 set_using_sysemu(tmp[0] - '0');
309 /* We use the first char, but pretend to write everything */
310 return count;
311}
312
313static const struct proc_ops sysemu_proc_ops = {
314 .proc_open = sysemu_proc_open,
315 .proc_read = seq_read,
316 .proc_lseek = seq_lseek,
317 .proc_release = single_release,
318 .proc_write = sysemu_proc_write,
319};
320
321int __init make_proc_sysemu(void)
322{
323 struct proc_dir_entry *ent;
324 if (!sysemu_supported)
325 return 0;
326
327 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
328
329 if (ent == NULL)
330 {
331 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
332 return 0;
333 }
334
335 return 0;
336}
337
338late_initcall(make_proc_sysemu);
339
340int singlestepping(void * t)
341{
342 struct task_struct *task = t ? t : current;
343
344 if (!(task->ptrace & PT_DTRACE))
345 return 0;
346
347 if (task->thread.singlestep_syscall)
348 return 1;
349
350 return 2;
351}
352
353/*
354 * Only x86 and x86_64 have an arch_align_stack().
355 * All other arches have "#define arch_align_stack(x) (x)"
356 * in their asm/exec.h
357 * As this is included in UML from asm-um/system-generic.h,
358 * we can use it to behave as the subarch does.
359 */
360#ifndef arch_align_stack
361unsigned long arch_align_stack(unsigned long sp)
362{
363 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
364 sp -= get_random_int() % 8192;
365 return sp & ~0xf;
366}
367#endif
368
369unsigned long get_wchan(struct task_struct *p)
370{
371 unsigned long stack_page, sp, ip;
372 bool seen_sched = 0;
373
374 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
375 return 0;
376
377 stack_page = (unsigned long) task_stack_page(p);
378 /* Bail if the process has no kernel stack for some reason */
379 if (stack_page == 0)
380 return 0;
381
382 sp = p->thread.switch_buf->JB_SP;
383 /*
384 * Bail if the stack pointer is below the bottom of the kernel
385 * stack for some reason
386 */
387 if (sp < stack_page)
388 return 0;
389
390 while (sp < stack_page + THREAD_SIZE) {
391 ip = *((unsigned long *) sp);
392 if (in_sched_functions(ip))
393 /* Ignore everything until we're above the scheduler */
394 seen_sched = 1;
395 else if (kernel_text_address(ip) && seen_sched)
396 return ip;
397
398 sp += sizeof(unsigned long);
399 }
400
401 return 0;
402}
403
404int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
405{
406 int cpu = current_thread_info()->cpu;
407
408 return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
409}
410
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Copyright 2003 PathScale, Inc.
7 */
8
9#include <linux/stddef.h>
10#include <linux/err.h>
11#include <linux/hardirq.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/personality.h>
15#include <linux/proc_fs.h>
16#include <linux/ptrace.h>
17#include <linux/random.h>
18#include <linux/cpu.h>
19#include <linux/slab.h>
20#include <linux/sched.h>
21#include <linux/sched/debug.h>
22#include <linux/sched/task.h>
23#include <linux/sched/task_stack.h>
24#include <linux/seq_file.h>
25#include <linux/tick.h>
26#include <linux/threads.h>
27#include <linux/resume_user_mode.h>
28#include <asm/current.h>
29#include <asm/mmu_context.h>
30#include <asm/switch_to.h>
31#include <asm/exec.h>
32#include <linux/uaccess.h>
33#include <as-layout.h>
34#include <kern_util.h>
35#include <os.h>
36#include <skas.h>
37#include <registers.h>
38#include <linux/time-internal.h>
39#include <linux/elfcore.h>
40
41/*
42 * This is a per-cpu array. A processor only modifies its entry and it only
43 * cares about its entry, so it's OK if another processor is modifying its
44 * entry.
45 */
46struct task_struct *cpu_tasks[NR_CPUS];
47EXPORT_SYMBOL(cpu_tasks);
48
49void free_stack(unsigned long stack, int order)
50{
51 free_pages(stack, order);
52}
53
54unsigned long alloc_stack(int order, int atomic)
55{
56 unsigned long page;
57 gfp_t flags = GFP_KERNEL;
58
59 if (atomic)
60 flags = GFP_ATOMIC;
61 page = __get_free_pages(flags, order);
62
63 return page;
64}
65
66static inline void set_current(struct task_struct *task)
67{
68 cpu_tasks[task_thread_info(task)->cpu] = task;
69}
70
71struct task_struct *__switch_to(struct task_struct *from, struct task_struct *to)
72{
73 to->thread.prev_sched = from;
74 set_current(to);
75
76 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
77 arch_switch_to(current);
78
79 return current->thread.prev_sched;
80}
81
82void interrupt_end(void)
83{
84 struct pt_regs *regs = ¤t->thread.regs;
85
86 if (need_resched())
87 schedule();
88 if (test_thread_flag(TIF_SIGPENDING) ||
89 test_thread_flag(TIF_NOTIFY_SIGNAL))
90 do_signal(regs);
91 if (test_thread_flag(TIF_NOTIFY_RESUME))
92 resume_user_mode_work(regs);
93}
94
95int get_current_pid(void)
96{
97 return task_pid_nr(current);
98}
99
100/*
101 * This is called magically, by its address being stuffed in a jmp_buf
102 * and being longjmp-d to.
103 */
104void new_thread_handler(void)
105{
106 int (*fn)(void *);
107 void *arg;
108
109 if (current->thread.prev_sched != NULL)
110 schedule_tail(current->thread.prev_sched);
111 current->thread.prev_sched = NULL;
112
113 fn = current->thread.request.thread.proc;
114 arg = current->thread.request.thread.arg;
115
116 /*
117 * callback returns only if the kernel thread execs a process
118 */
119 fn(arg);
120 userspace(¤t->thread.regs.regs);
121}
122
123/* Called magically, see new_thread_handler above */
124static void fork_handler(void)
125{
126 schedule_tail(current->thread.prev_sched);
127
128 /*
129 * XXX: if interrupt_end() calls schedule, this call to
130 * arch_switch_to isn't needed. We could want to apply this to
131 * improve performance. -bb
132 */
133 arch_switch_to(current);
134
135 current->thread.prev_sched = NULL;
136
137 userspace(¤t->thread.regs.regs);
138}
139
140int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
141{
142 unsigned long clone_flags = args->flags;
143 unsigned long sp = args->stack;
144 unsigned long tls = args->tls;
145 void (*handler)(void);
146 int ret = 0;
147
148 p->thread = (struct thread_struct) INIT_THREAD;
149
150 if (!args->fn) {
151 memcpy(&p->thread.regs.regs, current_pt_regs(),
152 sizeof(p->thread.regs.regs));
153 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
154 if (sp != 0)
155 REGS_SP(p->thread.regs.regs.gp) = sp;
156
157 handler = fork_handler;
158
159 arch_copy_thread(¤t->thread.arch, &p->thread.arch);
160 } else {
161 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
162 p->thread.request.thread.proc = args->fn;
163 p->thread.request.thread.arg = args->fn_arg;
164 handler = new_thread_handler;
165 }
166
167 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
168
169 if (!args->fn) {
170 clear_flushed_tls(p);
171
172 /*
173 * Set a new TLS for the child thread?
174 */
175 if (clone_flags & CLONE_SETTLS)
176 ret = arch_set_tls(p, tls);
177 }
178
179 return ret;
180}
181
182void initial_thread_cb(void (*proc)(void *), void *arg)
183{
184 int save_kmalloc_ok = kmalloc_ok;
185
186 kmalloc_ok = 0;
187 initial_thread_cb_skas(proc, arg);
188 kmalloc_ok = save_kmalloc_ok;
189}
190
191int arch_dup_task_struct(struct task_struct *dst,
192 struct task_struct *src)
193{
194 /* init_task is not dynamically sized (missing FPU state) */
195 if (unlikely(src == &init_task)) {
196 memcpy(dst, src, sizeof(init_task));
197 memset((void *)dst + sizeof(init_task), 0,
198 arch_task_struct_size - sizeof(init_task));
199 } else {
200 memcpy(dst, src, arch_task_struct_size);
201 }
202
203 return 0;
204}
205
206void um_idle_sleep(void)
207{
208 if (time_travel_mode != TT_MODE_OFF)
209 time_travel_sleep();
210 else
211 os_idle_sleep();
212}
213
214void arch_cpu_idle(void)
215{
216 um_idle_sleep();
217}
218
219int __uml_cant_sleep(void) {
220 return in_atomic() || irqs_disabled() || in_interrupt();
221 /* Is in_interrupt() really needed? */
222}
223
224int user_context(unsigned long sp)
225{
226 unsigned long stack;
227
228 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
229 return stack != (unsigned long) current_thread_info();
230}
231
232extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
233
234void do_uml_exitcalls(void)
235{
236 exitcall_t *call;
237
238 call = &__uml_exitcall_end;
239 while (--call >= &__uml_exitcall_begin)
240 (*call)();
241}
242
243char *uml_strdup(const char *string)
244{
245 return kstrdup(string, GFP_KERNEL);
246}
247EXPORT_SYMBOL(uml_strdup);
248
249int copy_from_user_proc(void *to, void __user *from, int size)
250{
251 return copy_from_user(to, from, size);
252}
253
254int singlestepping(void)
255{
256 return test_thread_flag(TIF_SINGLESTEP);
257}
258
259/*
260 * Only x86 and x86_64 have an arch_align_stack().
261 * All other arches have "#define arch_align_stack(x) (x)"
262 * in their asm/exec.h
263 * As this is included in UML from asm-um/system-generic.h,
264 * we can use it to behave as the subarch does.
265 */
266#ifndef arch_align_stack
267unsigned long arch_align_stack(unsigned long sp)
268{
269 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
270 sp -= get_random_u32_below(8192);
271 return sp & ~0xf;
272}
273#endif
274
275unsigned long __get_wchan(struct task_struct *p)
276{
277 unsigned long stack_page, sp, ip;
278 bool seen_sched = 0;
279
280 stack_page = (unsigned long) task_stack_page(p);
281 /* Bail if the process has no kernel stack for some reason */
282 if (stack_page == 0)
283 return 0;
284
285 sp = p->thread.switch_buf->JB_SP;
286 /*
287 * Bail if the stack pointer is below the bottom of the kernel
288 * stack for some reason
289 */
290 if (sp < stack_page)
291 return 0;
292
293 while (sp < stack_page + THREAD_SIZE) {
294 ip = *((unsigned long *) sp);
295 if (in_sched_functions(ip))
296 /* Ignore everything until we're above the scheduler */
297 seen_sched = 1;
298 else if (kernel_text_address(ip) && seen_sched)
299 return ip;
300
301 sp += sizeof(unsigned long);
302 }
303
304 return 0;
305}