Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
  4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
  5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  6 * Copyright 2003 PathScale, Inc.
 
  7 */
  8
  9#include <linux/stddef.h>
 10#include <linux/err.h>
 11#include <linux/hardirq.h>
 12#include <linux/mm.h>
 13#include <linux/module.h>
 14#include <linux/personality.h>
 15#include <linux/proc_fs.h>
 16#include <linux/ptrace.h>
 17#include <linux/random.h>
 18#include <linux/slab.h>
 19#include <linux/sched.h>
 20#include <linux/sched/debug.h>
 21#include <linux/sched/task.h>
 22#include <linux/sched/task_stack.h>
 23#include <linux/seq_file.h>
 24#include <linux/tick.h>
 25#include <linux/threads.h>
 26#include <linux/tracehook.h>
 27#include <asm/current.h>
 
 28#include <asm/mmu_context.h>
 29#include <linux/uaccess.h>
 30#include <as-layout.h>
 31#include <kern_util.h>
 32#include <os.h>
 33#include <skas.h>
 34#include <linux/time-internal.h>
 35
 36/*
 37 * This is a per-cpu array.  A processor only modifies its entry and it only
 38 * cares about its entry, so it's OK if another processor is modifying its
 39 * entry.
 40 */
 41struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
 42
 43static inline int external_pid(void)
 44{
 45	/* FIXME: Need to look up userspace_pid by cpu */
 46	return userspace_pid[0];
 47}
 48
 49int pid_to_processor_id(int pid)
 50{
 51	int i;
 52
 53	for (i = 0; i < ncpus; i++) {
 54		if (cpu_tasks[i].pid == pid)
 55			return i;
 56	}
 57	return -1;
 58}
 59
 60void free_stack(unsigned long stack, int order)
 61{
 62	free_pages(stack, order);
 63}
 64
 65unsigned long alloc_stack(int order, int atomic)
 66{
 67	unsigned long page;
 68	gfp_t flags = GFP_KERNEL;
 69
 70	if (atomic)
 71		flags = GFP_ATOMIC;
 72	page = __get_free_pages(flags, order);
 73
 74	return page;
 75}
 76
 77static inline void set_current(struct task_struct *task)
 78{
 79	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
 80		{ external_pid(), task });
 81}
 82
 83extern void arch_switch_to(struct task_struct *to);
 84
 85void *__switch_to(struct task_struct *from, struct task_struct *to)
 86{
 87	to->thread.prev_sched = from;
 88	set_current(to);
 89
 90	switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
 91	arch_switch_to(current);
 92
 93	return current->thread.prev_sched;
 94}
 95
 96void interrupt_end(void)
 97{
 98	struct pt_regs *regs = &current->thread.regs;
 99
100	if (need_resched())
101		schedule();
102	if (test_thread_flag(TIF_SIGPENDING))
103		do_signal(regs);
104	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
105		tracehook_notify_resume(regs);
106}
107
 
 
 
 
108int get_current_pid(void)
109{
110	return task_pid_nr(current);
111}
112
113/*
114 * This is called magically, by its address being stuffed in a jmp_buf
115 * and being longjmp-d to.
116 */
117void new_thread_handler(void)
118{
119	int (*fn)(void *), n;
120	void *arg;
121
122	if (current->thread.prev_sched != NULL)
123		schedule_tail(current->thread.prev_sched);
124	current->thread.prev_sched = NULL;
125
126	fn = current->thread.request.u.thread.proc;
127	arg = current->thread.request.u.thread.arg;
128
129	/*
130	 * callback returns only if the kernel thread execs a process
131	 */
132	n = fn(arg);
133	userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
134}
135
136/* Called magically, see new_thread_handler above */
137void fork_handler(void)
138{
139	force_flush_all();
140
141	schedule_tail(current->thread.prev_sched);
142
143	/*
144	 * XXX: if interrupt_end() calls schedule, this call to
145	 * arch_switch_to isn't needed. We could want to apply this to
146	 * improve performance. -bb
147	 */
148	arch_switch_to(current);
149
150	current->thread.prev_sched = NULL;
151
152	userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
153}
154
155int copy_thread(unsigned long clone_flags, unsigned long sp,
156		unsigned long arg, struct task_struct * p, unsigned long tls)
157{
158	void (*handler)(void);
159	int kthread = current->flags & PF_KTHREAD;
160	int ret = 0;
161
162	p->thread = (struct thread_struct) INIT_THREAD;
163
164	if (!kthread) {
165	  	memcpy(&p->thread.regs.regs, current_pt_regs(),
166		       sizeof(p->thread.regs.regs));
167		PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
168		if (sp != 0)
169			REGS_SP(p->thread.regs.regs.gp) = sp;
170
171		handler = fork_handler;
172
173		arch_copy_thread(&current->thread.arch, &p->thread.arch);
174	} else {
175		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
176		p->thread.request.u.thread.proc = (int (*)(void *))sp;
177		p->thread.request.u.thread.arg = (void *)arg;
178		handler = new_thread_handler;
179	}
180
181	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
182
183	if (!kthread) {
184		clear_flushed_tls(p);
185
186		/*
187		 * Set a new TLS for the child thread?
188		 */
189		if (clone_flags & CLONE_SETTLS)
190			ret = arch_set_tls(p, tls);
191	}
192
193	return ret;
194}
195
196void initial_thread_cb(void (*proc)(void *), void *arg)
197{
198	int save_kmalloc_ok = kmalloc_ok;
199
200	kmalloc_ok = 0;
201	initial_thread_cb_skas(proc, arg);
202	kmalloc_ok = save_kmalloc_ok;
203}
204
205static void um_idle_sleep(void)
206{
207	unsigned long long duration = UM_NSEC_PER_SEC;
208
209	if (time_travel_mode != TT_MODE_OFF) {
210		time_travel_sleep(duration);
211	} else {
212		os_idle_sleep(duration);
213	}
214}
215
216void arch_cpu_idle(void)
217{
218	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
219	um_idle_sleep();
220	local_irq_enable();
221}
222
223int __cant_sleep(void) {
224	return in_atomic() || irqs_disabled() || in_interrupt();
225	/* Is in_interrupt() really needed? */
226}
227
228int user_context(unsigned long sp)
229{
230	unsigned long stack;
231
232	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
233	return stack != (unsigned long) current_thread_info();
234}
235
236extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
237
238void do_uml_exitcalls(void)
239{
240	exitcall_t *call;
241
242	call = &__uml_exitcall_end;
243	while (--call >= &__uml_exitcall_begin)
244		(*call)();
245}
246
247char *uml_strdup(const char *string)
248{
249	return kstrdup(string, GFP_KERNEL);
250}
251EXPORT_SYMBOL(uml_strdup);
252
253int copy_to_user_proc(void __user *to, void *from, int size)
254{
255	return copy_to_user(to, from, size);
256}
257
258int copy_from_user_proc(void *to, void __user *from, int size)
259{
260	return copy_from_user(to, from, size);
261}
262
263int clear_user_proc(void __user *buf, int size)
264{
265	return clear_user(buf, size);
266}
267
 
 
 
 
 
268int cpu(void)
269{
270	return current_thread_info()->cpu;
271}
272
273static atomic_t using_sysemu = ATOMIC_INIT(0);
274int sysemu_supported;
275
276void set_using_sysemu(int value)
277{
278	if (value > sysemu_supported)
279		return;
280	atomic_set(&using_sysemu, value);
281}
282
283int get_using_sysemu(void)
284{
285	return atomic_read(&using_sysemu);
286}
287
288static int sysemu_proc_show(struct seq_file *m, void *v)
289{
290	seq_printf(m, "%d\n", get_using_sysemu());
291	return 0;
292}
293
294static int sysemu_proc_open(struct inode *inode, struct file *file)
295{
296	return single_open(file, sysemu_proc_show, NULL);
297}
298
299static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
300				 size_t count, loff_t *pos)
301{
302	char tmp[2];
303
304	if (copy_from_user(tmp, buf, 1))
305		return -EFAULT;
306
307	if (tmp[0] >= '0' && tmp[0] <= '2')
308		set_using_sysemu(tmp[0] - '0');
309	/* We use the first char, but pretend to write everything */
310	return count;
311}
312
313static const struct proc_ops sysemu_proc_ops = {
314	.proc_open	= sysemu_proc_open,
315	.proc_read	= seq_read,
316	.proc_lseek	= seq_lseek,
317	.proc_release	= single_release,
318	.proc_write	= sysemu_proc_write,
 
319};
320
321int __init make_proc_sysemu(void)
322{
323	struct proc_dir_entry *ent;
324	if (!sysemu_supported)
325		return 0;
326
327	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
328
329	if (ent == NULL)
330	{
331		printk(KERN_WARNING "Failed to register /proc/sysemu\n");
332		return 0;
333	}
334
335	return 0;
336}
337
338late_initcall(make_proc_sysemu);
339
340int singlestepping(void * t)
341{
342	struct task_struct *task = t ? t : current;
343
344	if (!(task->ptrace & PT_DTRACE))
345		return 0;
346
347	if (task->thread.singlestep_syscall)
348		return 1;
349
350	return 2;
351}
352
353/*
354 * Only x86 and x86_64 have an arch_align_stack().
355 * All other arches have "#define arch_align_stack(x) (x)"
356 * in their asm/exec.h
357 * As this is included in UML from asm-um/system-generic.h,
358 * we can use it to behave as the subarch does.
359 */
360#ifndef arch_align_stack
361unsigned long arch_align_stack(unsigned long sp)
362{
363	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
364		sp -= get_random_int() % 8192;
365	return sp & ~0xf;
366}
367#endif
368
369unsigned long get_wchan(struct task_struct *p)
370{
371	unsigned long stack_page, sp, ip;
372	bool seen_sched = 0;
373
374	if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
375		return 0;
376
377	stack_page = (unsigned long) task_stack_page(p);
378	/* Bail if the process has no kernel stack for some reason */
379	if (stack_page == 0)
380		return 0;
381
382	sp = p->thread.switch_buf->JB_SP;
383	/*
384	 * Bail if the stack pointer is below the bottom of the kernel
385	 * stack for some reason
386	 */
387	if (sp < stack_page)
388		return 0;
389
390	while (sp < stack_page + THREAD_SIZE) {
391		ip = *((unsigned long *) sp);
392		if (in_sched_functions(ip))
393			/* Ignore everything until we're above the scheduler */
394			seen_sched = 1;
395		else if (kernel_text_address(ip) && seen_sched)
396			return ip;
397
398		sp += sizeof(unsigned long);
399	}
400
401	return 0;
402}
403
404int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
405{
406	int cpu = current_thread_info()->cpu;
407
408	return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
409}
410
v4.6
 
  1/*
  2 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
  3 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
  4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  5 * Copyright 2003 PathScale, Inc.
  6 * Licensed under the GPL
  7 */
  8
  9#include <linux/stddef.h>
 10#include <linux/err.h>
 11#include <linux/hardirq.h>
 12#include <linux/mm.h>
 13#include <linux/module.h>
 14#include <linux/personality.h>
 15#include <linux/proc_fs.h>
 16#include <linux/ptrace.h>
 17#include <linux/random.h>
 18#include <linux/slab.h>
 19#include <linux/sched.h>
 
 
 
 20#include <linux/seq_file.h>
 21#include <linux/tick.h>
 22#include <linux/threads.h>
 23#include <linux/tracehook.h>
 24#include <asm/current.h>
 25#include <asm/pgtable.h>
 26#include <asm/mmu_context.h>
 27#include <asm/uaccess.h>
 28#include <as-layout.h>
 29#include <kern_util.h>
 30#include <os.h>
 31#include <skas.h>
 32#include <timer-internal.h>
 33
 34/*
 35 * This is a per-cpu array.  A processor only modifies its entry and it only
 36 * cares about its entry, so it's OK if another processor is modifying its
 37 * entry.
 38 */
 39struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
 40
 41static inline int external_pid(void)
 42{
 43	/* FIXME: Need to look up userspace_pid by cpu */
 44	return userspace_pid[0];
 45}
 46
 47int pid_to_processor_id(int pid)
 48{
 49	int i;
 50
 51	for (i = 0; i < ncpus; i++) {
 52		if (cpu_tasks[i].pid == pid)
 53			return i;
 54	}
 55	return -1;
 56}
 57
 58void free_stack(unsigned long stack, int order)
 59{
 60	free_pages(stack, order);
 61}
 62
 63unsigned long alloc_stack(int order, int atomic)
 64{
 65	unsigned long page;
 66	gfp_t flags = GFP_KERNEL;
 67
 68	if (atomic)
 69		flags = GFP_ATOMIC;
 70	page = __get_free_pages(flags, order);
 71
 72	return page;
 73}
 74
 75static inline void set_current(struct task_struct *task)
 76{
 77	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
 78		{ external_pid(), task });
 79}
 80
 81extern void arch_switch_to(struct task_struct *to);
 82
 83void *__switch_to(struct task_struct *from, struct task_struct *to)
 84{
 85	to->thread.prev_sched = from;
 86	set_current(to);
 87
 88	switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
 89	arch_switch_to(current);
 90
 91	return current->thread.prev_sched;
 92}
 93
 94void interrupt_end(void)
 95{
 96	struct pt_regs *regs = &current->thread.regs;
 97
 98	if (need_resched())
 99		schedule();
100	if (test_thread_flag(TIF_SIGPENDING))
101		do_signal(regs);
102	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
103		tracehook_notify_resume(regs);
104}
105
106void exit_thread(void)
107{
108}
109
110int get_current_pid(void)
111{
112	return task_pid_nr(current);
113}
114
115/*
116 * This is called magically, by its address being stuffed in a jmp_buf
117 * and being longjmp-d to.
118 */
119void new_thread_handler(void)
120{
121	int (*fn)(void *), n;
122	void *arg;
123
124	if (current->thread.prev_sched != NULL)
125		schedule_tail(current->thread.prev_sched);
126	current->thread.prev_sched = NULL;
127
128	fn = current->thread.request.u.thread.proc;
129	arg = current->thread.request.u.thread.arg;
130
131	/*
132	 * callback returns only if the kernel thread execs a process
133	 */
134	n = fn(arg);
135	userspace(&current->thread.regs.regs);
136}
137
138/* Called magically, see new_thread_handler above */
139void fork_handler(void)
140{
141	force_flush_all();
142
143	schedule_tail(current->thread.prev_sched);
144
145	/*
146	 * XXX: if interrupt_end() calls schedule, this call to
147	 * arch_switch_to isn't needed. We could want to apply this to
148	 * improve performance. -bb
149	 */
150	arch_switch_to(current);
151
152	current->thread.prev_sched = NULL;
153
154	userspace(&current->thread.regs.regs);
155}
156
157int copy_thread(unsigned long clone_flags, unsigned long sp,
158		unsigned long arg, struct task_struct * p)
159{
160	void (*handler)(void);
161	int kthread = current->flags & PF_KTHREAD;
162	int ret = 0;
163
164	p->thread = (struct thread_struct) INIT_THREAD;
165
166	if (!kthread) {
167	  	memcpy(&p->thread.regs.regs, current_pt_regs(),
168		       sizeof(p->thread.regs.regs));
169		PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
170		if (sp != 0)
171			REGS_SP(p->thread.regs.regs.gp) = sp;
172
173		handler = fork_handler;
174
175		arch_copy_thread(&current->thread.arch, &p->thread.arch);
176	} else {
177		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
178		p->thread.request.u.thread.proc = (int (*)(void *))sp;
179		p->thread.request.u.thread.arg = (void *)arg;
180		handler = new_thread_handler;
181	}
182
183	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
184
185	if (!kthread) {
186		clear_flushed_tls(p);
187
188		/*
189		 * Set a new TLS for the child thread?
190		 */
191		if (clone_flags & CLONE_SETTLS)
192			ret = arch_copy_tls(p);
193	}
194
195	return ret;
196}
197
198void initial_thread_cb(void (*proc)(void *), void *arg)
199{
200	int save_kmalloc_ok = kmalloc_ok;
201
202	kmalloc_ok = 0;
203	initial_thread_cb_skas(proc, arg);
204	kmalloc_ok = save_kmalloc_ok;
205}
206
 
 
 
 
 
 
 
 
 
 
 
207void arch_cpu_idle(void)
208{
209	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
210	os_idle_sleep(UM_NSEC_PER_SEC);
211	local_irq_enable();
212}
213
214int __cant_sleep(void) {
215	return in_atomic() || irqs_disabled() || in_interrupt();
216	/* Is in_interrupt() really needed? */
217}
218
219int user_context(unsigned long sp)
220{
221	unsigned long stack;
222
223	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
224	return stack != (unsigned long) current_thread_info();
225}
226
227extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
228
229void do_uml_exitcalls(void)
230{
231	exitcall_t *call;
232
233	call = &__uml_exitcall_end;
234	while (--call >= &__uml_exitcall_begin)
235		(*call)();
236}
237
238char *uml_strdup(const char *string)
239{
240	return kstrdup(string, GFP_KERNEL);
241}
242EXPORT_SYMBOL(uml_strdup);
243
244int copy_to_user_proc(void __user *to, void *from, int size)
245{
246	return copy_to_user(to, from, size);
247}
248
249int copy_from_user_proc(void *to, void __user *from, int size)
250{
251	return copy_from_user(to, from, size);
252}
253
254int clear_user_proc(void __user *buf, int size)
255{
256	return clear_user(buf, size);
257}
258
259int strlen_user_proc(char __user *str)
260{
261	return strlen_user(str);
262}
263
264int cpu(void)
265{
266	return current_thread_info()->cpu;
267}
268
269static atomic_t using_sysemu = ATOMIC_INIT(0);
270int sysemu_supported;
271
272void set_using_sysemu(int value)
273{
274	if (value > sysemu_supported)
275		return;
276	atomic_set(&using_sysemu, value);
277}
278
279int get_using_sysemu(void)
280{
281	return atomic_read(&using_sysemu);
282}
283
284static int sysemu_proc_show(struct seq_file *m, void *v)
285{
286	seq_printf(m, "%d\n", get_using_sysemu());
287	return 0;
288}
289
290static int sysemu_proc_open(struct inode *inode, struct file *file)
291{
292	return single_open(file, sysemu_proc_show, NULL);
293}
294
295static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
296				 size_t count, loff_t *pos)
297{
298	char tmp[2];
299
300	if (copy_from_user(tmp, buf, 1))
301		return -EFAULT;
302
303	if (tmp[0] >= '0' && tmp[0] <= '2')
304		set_using_sysemu(tmp[0] - '0');
305	/* We use the first char, but pretend to write everything */
306	return count;
307}
308
309static const struct file_operations sysemu_proc_fops = {
310	.owner		= THIS_MODULE,
311	.open		= sysemu_proc_open,
312	.read		= seq_read,
313	.llseek		= seq_lseek,
314	.release	= single_release,
315	.write		= sysemu_proc_write,
316};
317
318int __init make_proc_sysemu(void)
319{
320	struct proc_dir_entry *ent;
321	if (!sysemu_supported)
322		return 0;
323
324	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
325
326	if (ent == NULL)
327	{
328		printk(KERN_WARNING "Failed to register /proc/sysemu\n");
329		return 0;
330	}
331
332	return 0;
333}
334
335late_initcall(make_proc_sysemu);
336
337int singlestepping(void * t)
338{
339	struct task_struct *task = t ? t : current;
340
341	if (!(task->ptrace & PT_DTRACE))
342		return 0;
343
344	if (task->thread.singlestep_syscall)
345		return 1;
346
347	return 2;
348}
349
350/*
351 * Only x86 and x86_64 have an arch_align_stack().
352 * All other arches have "#define arch_align_stack(x) (x)"
353 * in their asm/exec.h
354 * As this is included in UML from asm-um/system-generic.h,
355 * we can use it to behave as the subarch does.
356 */
357#ifndef arch_align_stack
358unsigned long arch_align_stack(unsigned long sp)
359{
360	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
361		sp -= get_random_int() % 8192;
362	return sp & ~0xf;
363}
364#endif
365
366unsigned long get_wchan(struct task_struct *p)
367{
368	unsigned long stack_page, sp, ip;
369	bool seen_sched = 0;
370
371	if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
372		return 0;
373
374	stack_page = (unsigned long) task_stack_page(p);
375	/* Bail if the process has no kernel stack for some reason */
376	if (stack_page == 0)
377		return 0;
378
379	sp = p->thread.switch_buf->JB_SP;
380	/*
381	 * Bail if the stack pointer is below the bottom of the kernel
382	 * stack for some reason
383	 */
384	if (sp < stack_page)
385		return 0;
386
387	while (sp < stack_page + THREAD_SIZE) {
388		ip = *((unsigned long *) sp);
389		if (in_sched_functions(ip))
390			/* Ignore everything until we're above the scheduler */
391			seen_sched = 1;
392		else if (kernel_text_address(ip) && seen_sched)
393			return ip;
394
395		sp += sizeof(unsigned long);
396	}
397
398	return 0;
399}
400
401int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
402{
403	int cpu = current_thread_info()->cpu;
404
405	return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
406}
407