Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
  3 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
  4 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  5 * Copyright 2003 PathScale, Inc.
  6 * Licensed under the GPL
  7 */
  8
  9#include <linux/stddef.h>
 10#include <linux/err.h>
 11#include <linux/hardirq.h>
 12#include <linux/mm.h>
 13#include <linux/module.h>
 14#include <linux/personality.h>
 15#include <linux/proc_fs.h>
 16#include <linux/ptrace.h>
 17#include <linux/random.h>
 
 18#include <linux/slab.h>
 19#include <linux/sched.h>
 20#include <linux/sched/debug.h>
 21#include <linux/sched/task.h>
 22#include <linux/sched/task_stack.h>
 23#include <linux/seq_file.h>
 24#include <linux/tick.h>
 25#include <linux/threads.h>
 26#include <linux/tracehook.h>
 27#include <asm/current.h>
 28#include <asm/pgtable.h>
 29#include <asm/mmu_context.h>
 
 
 30#include <linux/uaccess.h>
 31#include <as-layout.h>
 32#include <kern_util.h>
 33#include <os.h>
 34#include <skas.h>
 35#include <timer-internal.h>
 
 
 36
 37/*
 38 * This is a per-cpu array.  A processor only modifies its entry and it only
 39 * cares about its entry, so it's OK if another processor is modifying its
 40 * entry.
 41 */
 42struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
 43
 44static inline int external_pid(void)
 45{
 46	/* FIXME: Need to look up userspace_pid by cpu */
 47	return userspace_pid[0];
 48}
 49
 50int pid_to_processor_id(int pid)
 51{
 52	int i;
 53
 54	for (i = 0; i < ncpus; i++) {
 55		if (cpu_tasks[i].pid == pid)
 56			return i;
 57	}
 58	return -1;
 59}
 60
 61void free_stack(unsigned long stack, int order)
 62{
 63	free_pages(stack, order);
 64}
 65
 66unsigned long alloc_stack(int order, int atomic)
 67{
 68	unsigned long page;
 69	gfp_t flags = GFP_KERNEL;
 70
 71	if (atomic)
 72		flags = GFP_ATOMIC;
 73	page = __get_free_pages(flags, order);
 74
 75	return page;
 76}
 77
 78static inline void set_current(struct task_struct *task)
 79{
 80	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
 81		{ external_pid(), task });
 82}
 83
 84extern void arch_switch_to(struct task_struct *to);
 85
 86void *__switch_to(struct task_struct *from, struct task_struct *to)
 87{
 88	to->thread.prev_sched = from;
 89	set_current(to);
 90
 91	switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
 92	arch_switch_to(current);
 93
 94	return current->thread.prev_sched;
 95}
 96
 97void interrupt_end(void)
 98{
 99	struct pt_regs *regs = &current->thread.regs;
100
101	if (need_resched())
102		schedule();
103	if (test_thread_flag(TIF_SIGPENDING))
 
104		do_signal(regs);
105	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
106		tracehook_notify_resume(regs);
107}
108
109int get_current_pid(void)
110{
111	return task_pid_nr(current);
112}
113
114/*
115 * This is called magically, by its address being stuffed in a jmp_buf
116 * and being longjmp-d to.
117 */
118void new_thread_handler(void)
119{
120	int (*fn)(void *), n;
121	void *arg;
122
123	if (current->thread.prev_sched != NULL)
124		schedule_tail(current->thread.prev_sched);
125	current->thread.prev_sched = NULL;
126
127	fn = current->thread.request.u.thread.proc;
128	arg = current->thread.request.u.thread.arg;
129
130	/*
131	 * callback returns only if the kernel thread execs a process
132	 */
133	n = fn(arg);
134	userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
135}
136
137/* Called magically, see new_thread_handler above */
138void fork_handler(void)
139{
140	force_flush_all();
141
142	schedule_tail(current->thread.prev_sched);
143
144	/*
145	 * XXX: if interrupt_end() calls schedule, this call to
146	 * arch_switch_to isn't needed. We could want to apply this to
147	 * improve performance. -bb
148	 */
149	arch_switch_to(current);
150
151	current->thread.prev_sched = NULL;
152
153	userspace(&current->thread.regs.regs, current_thread_info()->aux_fp_regs);
154}
155
156int copy_thread(unsigned long clone_flags, unsigned long sp,
157		unsigned long arg, struct task_struct * p)
158{
 
 
 
159	void (*handler)(void);
160	int kthread = current->flags & PF_KTHREAD;
161	int ret = 0;
162
163	p->thread = (struct thread_struct) INIT_THREAD;
164
165	if (!kthread) {
166	  	memcpy(&p->thread.regs.regs, current_pt_regs(),
167		       sizeof(p->thread.regs.regs));
168		PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
169		if (sp != 0)
170			REGS_SP(p->thread.regs.regs.gp) = sp;
171
172		handler = fork_handler;
173
174		arch_copy_thread(&current->thread.arch, &p->thread.arch);
175	} else {
176		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
177		p->thread.request.u.thread.proc = (int (*)(void *))sp;
178		p->thread.request.u.thread.arg = (void *)arg;
179		handler = new_thread_handler;
180	}
181
182	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
183
184	if (!kthread) {
185		clear_flushed_tls(p);
186
187		/*
188		 * Set a new TLS for the child thread?
189		 */
190		if (clone_flags & CLONE_SETTLS)
191			ret = arch_copy_tls(p);
192	}
193
194	return ret;
195}
196
197void initial_thread_cb(void (*proc)(void *), void *arg)
198{
199	int save_kmalloc_ok = kmalloc_ok;
200
201	kmalloc_ok = 0;
202	initial_thread_cb_skas(proc, arg);
203	kmalloc_ok = save_kmalloc_ok;
204}
205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206void arch_cpu_idle(void)
207{
208	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
209	os_idle_sleep(UM_NSEC_PER_SEC);
210	local_irq_enable();
211}
212
213int __cant_sleep(void) {
214	return in_atomic() || irqs_disabled() || in_interrupt();
215	/* Is in_interrupt() really needed? */
216}
217
218int user_context(unsigned long sp)
219{
220	unsigned long stack;
221
222	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
223	return stack != (unsigned long) current_thread_info();
224}
225
226extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
227
228void do_uml_exitcalls(void)
229{
230	exitcall_t *call;
231
232	call = &__uml_exitcall_end;
233	while (--call >= &__uml_exitcall_begin)
234		(*call)();
235}
236
237char *uml_strdup(const char *string)
238{
239	return kstrdup(string, GFP_KERNEL);
240}
241EXPORT_SYMBOL(uml_strdup);
242
243int copy_to_user_proc(void __user *to, void *from, int size)
244{
245	return copy_to_user(to, from, size);
246}
247
248int copy_from_user_proc(void *to, void __user *from, int size)
249{
250	return copy_from_user(to, from, size);
251}
252
253int clear_user_proc(void __user *buf, int size)
254{
255	return clear_user(buf, size);
256}
257
258int cpu(void)
259{
260	return current_thread_info()->cpu;
261}
262
263static atomic_t using_sysemu = ATOMIC_INIT(0);
264int sysemu_supported;
265
266void set_using_sysemu(int value)
267{
268	if (value > sysemu_supported)
269		return;
270	atomic_set(&using_sysemu, value);
271}
272
273int get_using_sysemu(void)
274{
275	return atomic_read(&using_sysemu);
276}
277
278static int sysemu_proc_show(struct seq_file *m, void *v)
279{
280	seq_printf(m, "%d\n", get_using_sysemu());
281	return 0;
282}
283
284static int sysemu_proc_open(struct inode *inode, struct file *file)
285{
286	return single_open(file, sysemu_proc_show, NULL);
287}
288
289static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
290				 size_t count, loff_t *pos)
291{
292	char tmp[2];
293
294	if (copy_from_user(tmp, buf, 1))
295		return -EFAULT;
296
297	if (tmp[0] >= '0' && tmp[0] <= '2')
298		set_using_sysemu(tmp[0] - '0');
299	/* We use the first char, but pretend to write everything */
300	return count;
301}
302
303static const struct file_operations sysemu_proc_fops = {
304	.owner		= THIS_MODULE,
305	.open		= sysemu_proc_open,
306	.read		= seq_read,
307	.llseek		= seq_lseek,
308	.release	= single_release,
309	.write		= sysemu_proc_write,
310};
311
312int __init make_proc_sysemu(void)
313{
314	struct proc_dir_entry *ent;
315	if (!sysemu_supported)
316		return 0;
317
318	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
319
320	if (ent == NULL)
321	{
322		printk(KERN_WARNING "Failed to register /proc/sysemu\n");
323		return 0;
324	}
325
326	return 0;
327}
328
329late_initcall(make_proc_sysemu);
330
331int singlestepping(void * t)
332{
333	struct task_struct *task = t ? t : current;
334
335	if (!(task->ptrace & PT_DTRACE))
336		return 0;
337
338	if (task->thread.singlestep_syscall)
339		return 1;
340
341	return 2;
342}
343
344/*
345 * Only x86 and x86_64 have an arch_align_stack().
346 * All other arches have "#define arch_align_stack(x) (x)"
347 * in their asm/exec.h
348 * As this is included in UML from asm-um/system-generic.h,
349 * we can use it to behave as the subarch does.
350 */
351#ifndef arch_align_stack
352unsigned long arch_align_stack(unsigned long sp)
353{
354	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
355		sp -= get_random_int() % 8192;
356	return sp & ~0xf;
357}
358#endif
359
360unsigned long get_wchan(struct task_struct *p)
361{
362	unsigned long stack_page, sp, ip;
363	bool seen_sched = 0;
364
365	if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
366		return 0;
367
368	stack_page = (unsigned long) task_stack_page(p);
369	/* Bail if the process has no kernel stack for some reason */
370	if (stack_page == 0)
371		return 0;
372
373	sp = p->thread.switch_buf->JB_SP;
374	/*
375	 * Bail if the stack pointer is below the bottom of the kernel
376	 * stack for some reason
377	 */
378	if (sp < stack_page)
379		return 0;
380
381	while (sp < stack_page + THREAD_SIZE) {
382		ip = *((unsigned long *) sp);
383		if (in_sched_functions(ip))
384			/* Ignore everything until we're above the scheduler */
385			seen_sched = 1;
386		else if (kernel_text_address(ip) && seen_sched)
387			return ip;
388
389		sp += sizeof(unsigned long);
390	}
391
392	return 0;
393}
394
395int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
396{
397	int cpu = current_thread_info()->cpu;
398
399	return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
400}
401
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
  4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
  5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  6 * Copyright 2003 PathScale, Inc.
 
  7 */
  8
  9#include <linux/stddef.h>
 10#include <linux/err.h>
 11#include <linux/hardirq.h>
 12#include <linux/mm.h>
 13#include <linux/module.h>
 14#include <linux/personality.h>
 15#include <linux/proc_fs.h>
 16#include <linux/ptrace.h>
 17#include <linux/random.h>
 18#include <linux/cpu.h>
 19#include <linux/slab.h>
 20#include <linux/sched.h>
 21#include <linux/sched/debug.h>
 22#include <linux/sched/task.h>
 23#include <linux/sched/task_stack.h>
 24#include <linux/seq_file.h>
 25#include <linux/tick.h>
 26#include <linux/threads.h>
 27#include <linux/resume_user_mode.h>
 28#include <asm/current.h>
 
 29#include <asm/mmu_context.h>
 30#include <asm/switch_to.h>
 31#include <asm/exec.h>
 32#include <linux/uaccess.h>
 33#include <as-layout.h>
 34#include <kern_util.h>
 35#include <os.h>
 36#include <skas.h>
 37#include <registers.h>
 38#include <linux/time-internal.h>
 39#include <linux/elfcore.h>
 40
 41/*
 42 * This is a per-cpu array.  A processor only modifies its entry and it only
 43 * cares about its entry, so it's OK if another processor is modifying its
 44 * entry.
 45 */
 46struct task_struct *cpu_tasks[NR_CPUS];
 47EXPORT_SYMBOL(cpu_tasks);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 48
 49void free_stack(unsigned long stack, int order)
 50{
 51	free_pages(stack, order);
 52}
 53
 54unsigned long alloc_stack(int order, int atomic)
 55{
 56	unsigned long page;
 57	gfp_t flags = GFP_KERNEL;
 58
 59	if (atomic)
 60		flags = GFP_ATOMIC;
 61	page = __get_free_pages(flags, order);
 62
 63	return page;
 64}
 65
 66static inline void set_current(struct task_struct *task)
 67{
 68	cpu_tasks[task_thread_info(task)->cpu] = task;
 
 69}
 70
 71struct task_struct *__switch_to(struct task_struct *from, struct task_struct *to)
 
 
 72{
 73	to->thread.prev_sched = from;
 74	set_current(to);
 75
 76	switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
 77	arch_switch_to(current);
 78
 79	return current->thread.prev_sched;
 80}
 81
 82void interrupt_end(void)
 83{
 84	struct pt_regs *regs = &current->thread.regs;
 85
 86	if (need_resched())
 87		schedule();
 88	if (test_thread_flag(TIF_SIGPENDING) ||
 89	    test_thread_flag(TIF_NOTIFY_SIGNAL))
 90		do_signal(regs);
 91	if (test_thread_flag(TIF_NOTIFY_RESUME))
 92		resume_user_mode_work(regs);
 93}
 94
 95int get_current_pid(void)
 96{
 97	return task_pid_nr(current);
 98}
 99
100/*
101 * This is called magically, by its address being stuffed in a jmp_buf
102 * and being longjmp-d to.
103 */
104void new_thread_handler(void)
105{
106	int (*fn)(void *);
107	void *arg;
108
109	if (current->thread.prev_sched != NULL)
110		schedule_tail(current->thread.prev_sched);
111	current->thread.prev_sched = NULL;
112
113	fn = current->thread.request.thread.proc;
114	arg = current->thread.request.thread.arg;
115
116	/*
117	 * callback returns only if the kernel thread execs a process
118	 */
119	fn(arg);
120	userspace(&current->thread.regs.regs);
121}
122
123/* Called magically, see new_thread_handler above */
124static void fork_handler(void)
125{
 
 
126	schedule_tail(current->thread.prev_sched);
127
128	/*
129	 * XXX: if interrupt_end() calls schedule, this call to
130	 * arch_switch_to isn't needed. We could want to apply this to
131	 * improve performance. -bb
132	 */
133	arch_switch_to(current);
134
135	current->thread.prev_sched = NULL;
136
137	userspace(&current->thread.regs.regs);
138}
139
140int copy_thread(struct task_struct * p, const struct kernel_clone_args *args)
 
141{
142	unsigned long clone_flags = args->flags;
143	unsigned long sp = args->stack;
144	unsigned long tls = args->tls;
145	void (*handler)(void);
 
146	int ret = 0;
147
148	p->thread = (struct thread_struct) INIT_THREAD;
149
150	if (!args->fn) {
151	  	memcpy(&p->thread.regs.regs, current_pt_regs(),
152		       sizeof(p->thread.regs.regs));
153		PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
154		if (sp != 0)
155			REGS_SP(p->thread.regs.regs.gp) = sp;
156
157		handler = fork_handler;
158
159		arch_copy_thread(&current->thread.arch, &p->thread.arch);
160	} else {
161		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
162		p->thread.request.thread.proc = args->fn;
163		p->thread.request.thread.arg = args->fn_arg;
164		handler = new_thread_handler;
165	}
166
167	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
168
169	if (!args->fn) {
170		clear_flushed_tls(p);
171
172		/*
173		 * Set a new TLS for the child thread?
174		 */
175		if (clone_flags & CLONE_SETTLS)
176			ret = arch_set_tls(p, tls);
177	}
178
179	return ret;
180}
181
182void initial_thread_cb(void (*proc)(void *), void *arg)
183{
184	int save_kmalloc_ok = kmalloc_ok;
185
186	kmalloc_ok = 0;
187	initial_thread_cb_skas(proc, arg);
188	kmalloc_ok = save_kmalloc_ok;
189}
190
191int arch_dup_task_struct(struct task_struct *dst,
192			 struct task_struct *src)
193{
194	/* init_task is not dynamically sized (missing FPU state) */
195	if (unlikely(src == &init_task)) {
196		memcpy(dst, src, sizeof(init_task));
197		memset((void *)dst + sizeof(init_task), 0,
198		       arch_task_struct_size - sizeof(init_task));
199	} else {
200		memcpy(dst, src, arch_task_struct_size);
201	}
202
203	return 0;
204}
205
206void um_idle_sleep(void)
207{
208	if (time_travel_mode != TT_MODE_OFF)
209		time_travel_sleep();
210	else
211		os_idle_sleep();
212}
213
214void arch_cpu_idle(void)
215{
216	um_idle_sleep();
 
 
217}
218
219int __uml_cant_sleep(void) {
220	return in_atomic() || irqs_disabled() || in_interrupt();
221	/* Is in_interrupt() really needed? */
222}
223
224int user_context(unsigned long sp)
225{
226	unsigned long stack;
227
228	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
229	return stack != (unsigned long) current_thread_info();
230}
231
232extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
233
234void do_uml_exitcalls(void)
235{
236	exitcall_t *call;
237
238	call = &__uml_exitcall_end;
239	while (--call >= &__uml_exitcall_begin)
240		(*call)();
241}
242
243char *uml_strdup(const char *string)
244{
245	return kstrdup(string, GFP_KERNEL);
246}
247EXPORT_SYMBOL(uml_strdup);
248
 
 
 
 
 
249int copy_from_user_proc(void *to, void __user *from, int size)
250{
251	return copy_from_user(to, from, size);
252}
253
254int singlestepping(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
255{
256	return test_thread_flag(TIF_SINGLESTEP);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257}
258
259/*
260 * Only x86 and x86_64 have an arch_align_stack().
261 * All other arches have "#define arch_align_stack(x) (x)"
262 * in their asm/exec.h
263 * As this is included in UML from asm-um/system-generic.h,
264 * we can use it to behave as the subarch does.
265 */
266#ifndef arch_align_stack
267unsigned long arch_align_stack(unsigned long sp)
268{
269	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
270		sp -= get_random_u32_below(8192);
271	return sp & ~0xf;
272}
273#endif
274
275unsigned long __get_wchan(struct task_struct *p)
276{
277	unsigned long stack_page, sp, ip;
278	bool seen_sched = 0;
279
 
 
 
280	stack_page = (unsigned long) task_stack_page(p);
281	/* Bail if the process has no kernel stack for some reason */
282	if (stack_page == 0)
283		return 0;
284
285	sp = p->thread.switch_buf->JB_SP;
286	/*
287	 * Bail if the stack pointer is below the bottom of the kernel
288	 * stack for some reason
289	 */
290	if (sp < stack_page)
291		return 0;
292
293	while (sp < stack_page + THREAD_SIZE) {
294		ip = *((unsigned long *) sp);
295		if (in_sched_functions(ip))
296			/* Ignore everything until we're above the scheduler */
297			seen_sched = 1;
298		else if (kernel_text_address(ip) && seen_sched)
299			return ip;
300
301		sp += sizeof(unsigned long);
302	}
303
304	return 0;
305}