Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1/*
  2 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
  3 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
  4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  5 * Copyright 2003 PathScale, Inc.
  6 * Licensed under the GPL
  7 */
  8
  9#include <linux/stddef.h>
 10#include <linux/err.h>
 11#include <linux/hardirq.h>
 12#include <linux/mm.h>
 13#include <linux/module.h>
 14#include <linux/personality.h>
 15#include <linux/proc_fs.h>
 16#include <linux/ptrace.h>
 17#include <linux/random.h>
 18#include <linux/slab.h>
 19#include <linux/sched.h>
 
 
 
 20#include <linux/seq_file.h>
 21#include <linux/tick.h>
 22#include <linux/threads.h>
 23#include <linux/tracehook.h>
 24#include <asm/current.h>
 25#include <asm/pgtable.h>
 26#include <asm/mmu_context.h>
 27#include <linux/uaccess.h>
 28#include <as-layout.h>
 29#include <kern_util.h>
 30#include <os.h>
 31#include <skas.h>
 32#include <timer-internal.h>
 33
 34/*
 35 * This is a per-cpu array.  A processor only modifies its entry and it only
 36 * cares about its entry, so it's OK if another processor is modifying its
 37 * entry.
 38 */
 39struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
 40
 41static inline int external_pid(void)
 42{
 43	/* FIXME: Need to look up userspace_pid by cpu */
 44	return userspace_pid[0];
 45}
 46
 47int pid_to_processor_id(int pid)
 48{
 49	int i;
 50
 51	for (i = 0; i < ncpus; i++) {
 52		if (cpu_tasks[i].pid == pid)
 53			return i;
 54	}
 55	return -1;
 56}
 57
 58void free_stack(unsigned long stack, int order)
 59{
 60	free_pages(stack, order);
 61}
 62
 63unsigned long alloc_stack(int order, int atomic)
 64{
 65	unsigned long page;
 66	gfp_t flags = GFP_KERNEL;
 67
 68	if (atomic)
 69		flags = GFP_ATOMIC;
 70	page = __get_free_pages(flags, order);
 71
 72	return page;
 73}
 74
 75static inline void set_current(struct task_struct *task)
 76{
 77	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
 78		{ external_pid(), task });
 79}
 80
 81extern void arch_switch_to(struct task_struct *to);
 82
 83void *__switch_to(struct task_struct *from, struct task_struct *to)
 84{
 85	to->thread.prev_sched = from;
 86	set_current(to);
 87
 88	switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
 89	arch_switch_to(current);
 90
 91	return current->thread.prev_sched;
 92}
 93
 94void interrupt_end(void)
 95{
 96	struct pt_regs *regs = &current->thread.regs;
 97
 98	if (need_resched())
 99		schedule();
100	if (test_thread_flag(TIF_SIGPENDING))
 
101		do_signal(regs);
102	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
103		tracehook_notify_resume(regs);
104}
105
106int get_current_pid(void)
107{
108	return task_pid_nr(current);
109}
110
111/*
112 * This is called magically, by its address being stuffed in a jmp_buf
113 * and being longjmp-d to.
114 */
115void new_thread_handler(void)
116{
117	int (*fn)(void *), n;
118	void *arg;
119
120	if (current->thread.prev_sched != NULL)
121		schedule_tail(current->thread.prev_sched);
122	current->thread.prev_sched = NULL;
123
124	fn = current->thread.request.u.thread.proc;
125	arg = current->thread.request.u.thread.arg;
126
127	/*
128	 * callback returns only if the kernel thread execs a process
129	 */
130	n = fn(arg);
131	userspace(&current->thread.regs.regs);
132}
133
134/* Called magically, see new_thread_handler above */
135void fork_handler(void)
136{
137	force_flush_all();
138
139	schedule_tail(current->thread.prev_sched);
140
141	/*
142	 * XXX: if interrupt_end() calls schedule, this call to
143	 * arch_switch_to isn't needed. We could want to apply this to
144	 * improve performance. -bb
145	 */
146	arch_switch_to(current);
147
148	current->thread.prev_sched = NULL;
149
150	userspace(&current->thread.regs.regs);
151}
152
153int copy_thread(unsigned long clone_flags, unsigned long sp,
154		unsigned long arg, struct task_struct * p)
155{
156	void (*handler)(void);
157	int kthread = current->flags & PF_KTHREAD;
158	int ret = 0;
159
160	p->thread = (struct thread_struct) INIT_THREAD;
161
162	if (!kthread) {
163	  	memcpy(&p->thread.regs.regs, current_pt_regs(),
164		       sizeof(p->thread.regs.regs));
165		PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
166		if (sp != 0)
167			REGS_SP(p->thread.regs.regs.gp) = sp;
168
169		handler = fork_handler;
170
171		arch_copy_thread(&current->thread.arch, &p->thread.arch);
172	} else {
173		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
174		p->thread.request.u.thread.proc = (int (*)(void *))sp;
175		p->thread.request.u.thread.arg = (void *)arg;
176		handler = new_thread_handler;
177	}
178
179	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
180
181	if (!kthread) {
182		clear_flushed_tls(p);
183
184		/*
185		 * Set a new TLS for the child thread?
186		 */
187		if (clone_flags & CLONE_SETTLS)
188			ret = arch_copy_tls(p);
189	}
190
191	return ret;
192}
193
194void initial_thread_cb(void (*proc)(void *), void *arg)
195{
196	int save_kmalloc_ok = kmalloc_ok;
197
198	kmalloc_ok = 0;
199	initial_thread_cb_skas(proc, arg);
200	kmalloc_ok = save_kmalloc_ok;
201}
202
 
 
 
 
 
 
 
 
203void arch_cpu_idle(void)
204{
205	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
206	os_idle_sleep(UM_NSEC_PER_SEC);
207	local_irq_enable();
208}
209
210int __cant_sleep(void) {
211	return in_atomic() || irqs_disabled() || in_interrupt();
212	/* Is in_interrupt() really needed? */
213}
214
215int user_context(unsigned long sp)
216{
217	unsigned long stack;
218
219	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
220	return stack != (unsigned long) current_thread_info();
221}
222
223extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
224
225void do_uml_exitcalls(void)
226{
227	exitcall_t *call;
228
229	call = &__uml_exitcall_end;
230	while (--call >= &__uml_exitcall_begin)
231		(*call)();
232}
233
234char *uml_strdup(const char *string)
235{
236	return kstrdup(string, GFP_KERNEL);
237}
238EXPORT_SYMBOL(uml_strdup);
239
240int copy_to_user_proc(void __user *to, void *from, int size)
241{
242	return copy_to_user(to, from, size);
243}
244
245int copy_from_user_proc(void *to, void __user *from, int size)
246{
247	return copy_from_user(to, from, size);
248}
249
250int clear_user_proc(void __user *buf, int size)
251{
252	return clear_user(buf, size);
253}
254
255int strlen_user_proc(char __user *str)
256{
257	return strlen_user(str);
258}
259
260int cpu(void)
261{
262	return current_thread_info()->cpu;
263}
264
265static atomic_t using_sysemu = ATOMIC_INIT(0);
266int sysemu_supported;
267
268void set_using_sysemu(int value)
269{
270	if (value > sysemu_supported)
271		return;
272	atomic_set(&using_sysemu, value);
273}
274
275int get_using_sysemu(void)
276{
277	return atomic_read(&using_sysemu);
278}
279
280static int sysemu_proc_show(struct seq_file *m, void *v)
281{
282	seq_printf(m, "%d\n", get_using_sysemu());
283	return 0;
284}
285
286static int sysemu_proc_open(struct inode *inode, struct file *file)
287{
288	return single_open(file, sysemu_proc_show, NULL);
289}
290
291static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
292				 size_t count, loff_t *pos)
293{
294	char tmp[2];
295
296	if (copy_from_user(tmp, buf, 1))
297		return -EFAULT;
298
299	if (tmp[0] >= '0' && tmp[0] <= '2')
300		set_using_sysemu(tmp[0] - '0');
301	/* We use the first char, but pretend to write everything */
302	return count;
303}
304
305static const struct file_operations sysemu_proc_fops = {
306	.owner		= THIS_MODULE,
307	.open		= sysemu_proc_open,
308	.read		= seq_read,
309	.llseek		= seq_lseek,
310	.release	= single_release,
311	.write		= sysemu_proc_write,
312};
313
314int __init make_proc_sysemu(void)
315{
316	struct proc_dir_entry *ent;
317	if (!sysemu_supported)
318		return 0;
319
320	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
321
322	if (ent == NULL)
323	{
324		printk(KERN_WARNING "Failed to register /proc/sysemu\n");
325		return 0;
326	}
327
328	return 0;
329}
330
331late_initcall(make_proc_sysemu);
332
333int singlestepping(void * t)
334{
335	struct task_struct *task = t ? t : current;
336
337	if (!(task->ptrace & PT_DTRACE))
338		return 0;
339
340	if (task->thread.singlestep_syscall)
341		return 1;
342
343	return 2;
344}
345
346/*
347 * Only x86 and x86_64 have an arch_align_stack().
348 * All other arches have "#define arch_align_stack(x) (x)"
349 * in their asm/exec.h
350 * As this is included in UML from asm-um/system-generic.h,
351 * we can use it to behave as the subarch does.
352 */
353#ifndef arch_align_stack
354unsigned long arch_align_stack(unsigned long sp)
355{
356	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
357		sp -= get_random_int() % 8192;
358	return sp & ~0xf;
359}
360#endif
361
362unsigned long get_wchan(struct task_struct *p)
363{
364	unsigned long stack_page, sp, ip;
365	bool seen_sched = 0;
366
367	if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
368		return 0;
369
370	stack_page = (unsigned long) task_stack_page(p);
371	/* Bail if the process has no kernel stack for some reason */
372	if (stack_page == 0)
373		return 0;
374
375	sp = p->thread.switch_buf->JB_SP;
376	/*
377	 * Bail if the stack pointer is below the bottom of the kernel
378	 * stack for some reason
379	 */
380	if (sp < stack_page)
381		return 0;
382
383	while (sp < stack_page + THREAD_SIZE) {
384		ip = *((unsigned long *) sp);
385		if (in_sched_functions(ip))
386			/* Ignore everything until we're above the scheduler */
387			seen_sched = 1;
388		else if (kernel_text_address(ip) && seen_sched)
389			return ip;
390
391		sp += sizeof(unsigned long);
392	}
393
394	return 0;
395}
396
397int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
398{
399	int cpu = current_thread_info()->cpu;
400
401	return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
402}
403
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
  4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
  5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  6 * Copyright 2003 PathScale, Inc.
 
  7 */
  8
  9#include <linux/stddef.h>
 10#include <linux/err.h>
 11#include <linux/hardirq.h>
 12#include <linux/mm.h>
 13#include <linux/module.h>
 14#include <linux/personality.h>
 15#include <linux/proc_fs.h>
 16#include <linux/ptrace.h>
 17#include <linux/random.h>
 18#include <linux/slab.h>
 19#include <linux/sched.h>
 20#include <linux/sched/debug.h>
 21#include <linux/sched/task.h>
 22#include <linux/sched/task_stack.h>
 23#include <linux/seq_file.h>
 24#include <linux/tick.h>
 25#include <linux/threads.h>
 26#include <linux/tracehook.h>
 27#include <asm/current.h>
 
 28#include <asm/mmu_context.h>
 29#include <linux/uaccess.h>
 30#include <as-layout.h>
 31#include <kern_util.h>
 32#include <os.h>
 33#include <skas.h>
 34#include <linux/time-internal.h>
 35
 36/*
 37 * This is a per-cpu array.  A processor only modifies its entry and it only
 38 * cares about its entry, so it's OK if another processor is modifying its
 39 * entry.
 40 */
 41struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
 42
 43static inline int external_pid(void)
 44{
 45	/* FIXME: Need to look up userspace_pid by cpu */
 46	return userspace_pid[0];
 47}
 48
 49int pid_to_processor_id(int pid)
 50{
 51	int i;
 52
 53	for (i = 0; i < ncpus; i++) {
 54		if (cpu_tasks[i].pid == pid)
 55			return i;
 56	}
 57	return -1;
 58}
 59
 60void free_stack(unsigned long stack, int order)
 61{
 62	free_pages(stack, order);
 63}
 64
 65unsigned long alloc_stack(int order, int atomic)
 66{
 67	unsigned long page;
 68	gfp_t flags = GFP_KERNEL;
 69
 70	if (atomic)
 71		flags = GFP_ATOMIC;
 72	page = __get_free_pages(flags, order);
 73
 74	return page;
 75}
 76
 77static inline void set_current(struct task_struct *task)
 78{
 79	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
 80		{ external_pid(), task });
 81}
 82
 83extern void arch_switch_to(struct task_struct *to);
 84
 85void *__switch_to(struct task_struct *from, struct task_struct *to)
 86{
 87	to->thread.prev_sched = from;
 88	set_current(to);
 89
 90	switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
 91	arch_switch_to(current);
 92
 93	return current->thread.prev_sched;
 94}
 95
 96void interrupt_end(void)
 97{
 98	struct pt_regs *regs = &current->thread.regs;
 99
100	if (need_resched())
101		schedule();
102	if (test_thread_flag(TIF_SIGPENDING) ||
103	    test_thread_flag(TIF_NOTIFY_SIGNAL))
104		do_signal(regs);
105	if (test_thread_flag(TIF_NOTIFY_RESUME))
106		tracehook_notify_resume(regs);
107}
108
109int get_current_pid(void)
110{
111	return task_pid_nr(current);
112}
113
114/*
115 * This is called magically, by its address being stuffed in a jmp_buf
116 * and being longjmp-d to.
117 */
118void new_thread_handler(void)
119{
120	int (*fn)(void *), n;
121	void *arg;
122
123	if (current->thread.prev_sched != NULL)
124		schedule_tail(current->thread.prev_sched);
125	current->thread.prev_sched = NULL;
126
127	fn = current->thread.request.u.thread.proc;
128	arg = current->thread.request.u.thread.arg;
129
130	/*
131	 * callback returns only if the kernel thread execs a process
132	 */
133	n = fn(arg);
134	userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
135}
136
137/* Called magically, see new_thread_handler above */
138void fork_handler(void)
139{
140	force_flush_all();
141
142	schedule_tail(current->thread.prev_sched);
143
144	/*
145	 * XXX: if interrupt_end() calls schedule, this call to
146	 * arch_switch_to isn't needed. We could want to apply this to
147	 * improve performance. -bb
148	 */
149	arch_switch_to(current);
150
151	current->thread.prev_sched = NULL;
152
153	userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
154}
155
156int copy_thread(unsigned long clone_flags, unsigned long sp,
157		unsigned long arg, struct task_struct * p, unsigned long tls)
158{
159	void (*handler)(void);
160	int kthread = current->flags & (PF_KTHREAD | PF_IO_WORKER);
161	int ret = 0;
162
163	p->thread = (struct thread_struct) INIT_THREAD;
164
165	if (!kthread) {
166	  	memcpy(&p->thread.regs.regs, current_pt_regs(),
167		       sizeof(p->thread.regs.regs));
168		PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
169		if (sp != 0)
170			REGS_SP(p->thread.regs.regs.gp) = sp;
171
172		handler = fork_handler;
173
174		arch_copy_thread(&current->thread.arch, &p->thread.arch);
175	} else {
176		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
177		p->thread.request.u.thread.proc = (int (*)(void *))sp;
178		p->thread.request.u.thread.arg = (void *)arg;
179		handler = new_thread_handler;
180	}
181
182	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
183
184	if (!kthread) {
185		clear_flushed_tls(p);
186
187		/*
188		 * Set a new TLS for the child thread?
189		 */
190		if (clone_flags & CLONE_SETTLS)
191			ret = arch_set_tls(p, tls);
192	}
193
194	return ret;
195}
196
197void initial_thread_cb(void (*proc)(void *), void *arg)
198{
199	int save_kmalloc_ok = kmalloc_ok;
200
201	kmalloc_ok = 0;
202	initial_thread_cb_skas(proc, arg);
203	kmalloc_ok = save_kmalloc_ok;
204}
205
206void um_idle_sleep(void)
207{
208	if (time_travel_mode != TT_MODE_OFF)
209		time_travel_sleep();
210	else
211		os_idle_sleep();
212}
213
214void arch_cpu_idle(void)
215{
216	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
217	um_idle_sleep();
218	raw_local_irq_enable();
219}
220
221int __cant_sleep(void) {
222	return in_atomic() || irqs_disabled() || in_interrupt();
223	/* Is in_interrupt() really needed? */
224}
225
226int user_context(unsigned long sp)
227{
228	unsigned long stack;
229
230	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
231	return stack != (unsigned long) current_thread_info();
232}
233
234extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
235
236void do_uml_exitcalls(void)
237{
238	exitcall_t *call;
239
240	call = &__uml_exitcall_end;
241	while (--call >= &__uml_exitcall_begin)
242		(*call)();
243}
244
245char *uml_strdup(const char *string)
246{
247	return kstrdup(string, GFP_KERNEL);
248}
249EXPORT_SYMBOL(uml_strdup);
250
251int copy_to_user_proc(void __user *to, void *from, int size)
252{
253	return copy_to_user(to, from, size);
254}
255
256int copy_from_user_proc(void *to, void __user *from, int size)
257{
258	return copy_from_user(to, from, size);
259}
260
261int clear_user_proc(void __user *buf, int size)
262{
263	return clear_user(buf, size);
264}
265
 
 
 
 
 
266int cpu(void)
267{
268	return current_thread_info()->cpu;
269}
270
271static atomic_t using_sysemu = ATOMIC_INIT(0);
272int sysemu_supported;
273
274void set_using_sysemu(int value)
275{
276	if (value > sysemu_supported)
277		return;
278	atomic_set(&using_sysemu, value);
279}
280
281int get_using_sysemu(void)
282{
283	return atomic_read(&using_sysemu);
284}
285
286static int sysemu_proc_show(struct seq_file *m, void *v)
287{
288	seq_printf(m, "%d\n", get_using_sysemu());
289	return 0;
290}
291
292static int sysemu_proc_open(struct inode *inode, struct file *file)
293{
294	return single_open(file, sysemu_proc_show, NULL);
295}
296
297static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
298				 size_t count, loff_t *pos)
299{
300	char tmp[2];
301
302	if (copy_from_user(tmp, buf, 1))
303		return -EFAULT;
304
305	if (tmp[0] >= '0' && tmp[0] <= '2')
306		set_using_sysemu(tmp[0] - '0');
307	/* We use the first char, but pretend to write everything */
308	return count;
309}
310
311static const struct proc_ops sysemu_proc_ops = {
312	.proc_open	= sysemu_proc_open,
313	.proc_read	= seq_read,
314	.proc_lseek	= seq_lseek,
315	.proc_release	= single_release,
316	.proc_write	= sysemu_proc_write,
 
317};
318
319int __init make_proc_sysemu(void)
320{
321	struct proc_dir_entry *ent;
322	if (!sysemu_supported)
323		return 0;
324
325	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
326
327	if (ent == NULL)
328	{
329		printk(KERN_WARNING "Failed to register /proc/sysemu\n");
330		return 0;
331	}
332
333	return 0;
334}
335
336late_initcall(make_proc_sysemu);
337
338int singlestepping(void * t)
339{
340	struct task_struct *task = t ? t : current;
341
342	if (!(task->ptrace & PT_DTRACE))
343		return 0;
344
345	if (task->thread.singlestep_syscall)
346		return 1;
347
348	return 2;
349}
350
351/*
352 * Only x86 and x86_64 have an arch_align_stack().
353 * All other arches have "#define arch_align_stack(x) (x)"
354 * in their asm/exec.h
355 * As this is included in UML from asm-um/system-generic.h,
356 * we can use it to behave as the subarch does.
357 */
358#ifndef arch_align_stack
359unsigned long arch_align_stack(unsigned long sp)
360{
361	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
362		sp -= get_random_int() % 8192;
363	return sp & ~0xf;
364}
365#endif
366
367unsigned long get_wchan(struct task_struct *p)
368{
369	unsigned long stack_page, sp, ip;
370	bool seen_sched = 0;
371
372	if ((p == NULL) || (p == current) || task_is_running(p))
373		return 0;
374
375	stack_page = (unsigned long) task_stack_page(p);
376	/* Bail if the process has no kernel stack for some reason */
377	if (stack_page == 0)
378		return 0;
379
380	sp = p->thread.switch_buf->JB_SP;
381	/*
382	 * Bail if the stack pointer is below the bottom of the kernel
383	 * stack for some reason
384	 */
385	if (sp < stack_page)
386		return 0;
387
388	while (sp < stack_page + THREAD_SIZE) {
389		ip = *((unsigned long *) sp);
390		if (in_sched_functions(ip))
391			/* Ignore everything until we're above the scheduler */
392			seen_sched = 1;
393		else if (kernel_text_address(ip) && seen_sched)
394			return ip;
395
396		sp += sizeof(unsigned long);
397	}
398
399	return 0;
400}
401
402int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
403{
404	int cpu = current_thread_info()->cpu;
405
406	return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
407}
408