Linux Audio

Check our new training course

Loading...
v3.1
  1/*
  2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3 * Copyright 2003 PathScale, Inc.
  4 * Licensed under the GPL
  5 */
  6
  7#include <linux/stddef.h>
  8#include <linux/err.h>
  9#include <linux/hardirq.h>
 10#include <linux/mm.h>
 11#include <linux/module.h>
 12#include <linux/personality.h>
 13#include <linux/proc_fs.h>
 14#include <linux/ptrace.h>
 15#include <linux/random.h>
 16#include <linux/slab.h>
 17#include <linux/sched.h>
 18#include <linux/seq_file.h>
 19#include <linux/tick.h>
 20#include <linux/threads.h>
 
 21#include <asm/current.h>
 22#include <asm/pgtable.h>
 
 23#include <asm/uaccess.h>
 24#include "as-layout.h"
 25#include "kern_util.h"
 26#include "os.h"
 27#include "skas.h"
 28#include "tlb.h"
 29
 30/*
 31 * This is a per-cpu array.  A processor only modifies its entry and it only
 32 * cares about its entry, so it's OK if another processor is modifying its
 33 * entry.
 34 */
 35struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
 36
 37static inline int external_pid(void)
 38{
 39	/* FIXME: Need to look up userspace_pid by cpu */
 40	return userspace_pid[0];
 41}
 42
 43int pid_to_processor_id(int pid)
 44{
 45	int i;
 46
 47	for (i = 0; i < ncpus; i++) {
 48		if (cpu_tasks[i].pid == pid)
 49			return i;
 50	}
 51	return -1;
 52}
 53
 54void free_stack(unsigned long stack, int order)
 55{
 56	free_pages(stack, order);
 57}
 58
 59unsigned long alloc_stack(int order, int atomic)
 60{
 61	unsigned long page;
 62	gfp_t flags = GFP_KERNEL;
 63
 64	if (atomic)
 65		flags = GFP_ATOMIC;
 66	page = __get_free_pages(flags, order);
 67
 68	return page;
 69}
 70
 71int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
 72{
 73	int pid;
 74
 75	current->thread.request.u.thread.proc = fn;
 76	current->thread.request.u.thread.arg = arg;
 77	pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
 78		      &current->thread.regs, 0, NULL, NULL);
 79	return pid;
 80}
 81
 82static inline void set_current(struct task_struct *task)
 83{
 84	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
 85		{ external_pid(), task });
 86}
 87
 88extern void arch_switch_to(struct task_struct *to);
 89
 90void *_switch_to(void *prev, void *next, void *last)
 91{
 92	struct task_struct *from = prev;
 93	struct task_struct *to = next;
 94
 95	to->thread.prev_sched = from;
 96	set_current(to);
 97
 98	do {
 99		current->thread.saved_task = NULL;
100
101		switch_threads(&from->thread.switch_buf,
102			       &to->thread.switch_buf);
103
104		arch_switch_to(current);
105
106		if (current->thread.saved_task)
107			show_regs(&(current->thread.regs));
108		to = current->thread.saved_task;
109		from = current;
110	} while (current->thread.saved_task);
111
112	return current->thread.prev_sched;
113
114}
115
116void interrupt_end(void)
117{
118	if (need_resched())
119		schedule();
120	if (test_tsk_thread_flag(current, TIF_SIGPENDING))
121		do_signal();
 
 
122}
123
124void exit_thread(void)
125{
126}
127
128void *get_current(void)
129{
130	return current;
131}
132
133/*
134 * This is called magically, by its address being stuffed in a jmp_buf
135 * and being longjmp-d to.
136 */
137void new_thread_handler(void)
138{
139	int (*fn)(void *), n;
140	void *arg;
141
142	if (current->thread.prev_sched != NULL)
143		schedule_tail(current->thread.prev_sched);
144	current->thread.prev_sched = NULL;
145
146	fn = current->thread.request.u.thread.proc;
147	arg = current->thread.request.u.thread.arg;
148
149	/*
150	 * The return value is 1 if the kernel thread execs a process,
151	 * 0 if it just exits
152	 */
153	n = run_kernel_thread(fn, arg, &current->thread.exec_buf);
154	if (n == 1) {
155		/* Handle any immediate reschedules or signals */
156		interrupt_end();
157		userspace(&current->thread.regs.regs);
158	}
159	else do_exit(0);
160}
161
162/* Called magically, see new_thread_handler above */
163void fork_handler(void)
164{
165	force_flush_all();
166
167	schedule_tail(current->thread.prev_sched);
168
169	/*
170	 * XXX: if interrupt_end() calls schedule, this call to
171	 * arch_switch_to isn't needed. We could want to apply this to
172	 * improve performance. -bb
173	 */
174	arch_switch_to(current);
175
176	current->thread.prev_sched = NULL;
177
178	/* Handle any immediate reschedules or signals */
179	interrupt_end();
180
181	userspace(&current->thread.regs.regs);
182}
183
184int copy_thread(unsigned long clone_flags, unsigned long sp,
185		unsigned long stack_top, struct task_struct * p,
186		struct pt_regs *regs)
187{
188	void (*handler)(void);
 
189	int ret = 0;
190
191	p->thread = (struct thread_struct) INIT_THREAD;
192
193	if (current->thread.forking) {
194	  	memcpy(&p->thread.regs.regs, &regs->regs,
195		       sizeof(p->thread.regs.regs));
196		REGS_SET_SYSCALL_RETURN(p->thread.regs.regs.gp, 0);
197		if (sp != 0)
198			REGS_SP(p->thread.regs.regs.gp) = sp;
199
200		handler = fork_handler;
201
202		arch_copy_thread(&current->thread.arch, &p->thread.arch);
203	}
204	else {
205		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
206		p->thread.request.u.thread = current->thread.request.u.thread;
 
207		handler = new_thread_handler;
208	}
209
210	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
211
212	if (current->thread.forking) {
213		clear_flushed_tls(p);
214
215		/*
216		 * Set a new TLS for the child thread?
217		 */
218		if (clone_flags & CLONE_SETTLS)
219			ret = arch_copy_tls(p);
220	}
221
222	return ret;
223}
224
225void initial_thread_cb(void (*proc)(void *), void *arg)
226{
227	int save_kmalloc_ok = kmalloc_ok;
228
229	kmalloc_ok = 0;
230	initial_thread_cb_skas(proc, arg);
231	kmalloc_ok = save_kmalloc_ok;
232}
233
234void default_idle(void)
235{
236	unsigned long long nsecs;
237
238	while (1) {
239		/* endless idle loop with no priority at all */
240
241		/*
242		 * although we are an idle CPU, we do not want to
243		 * get into the scheduler unnecessarily.
244		 */
245		if (need_resched())
246			schedule();
247
248		tick_nohz_stop_sched_tick(1);
249		nsecs = disable_timer();
250		idle_sleep(nsecs);
251		tick_nohz_restart_sched_tick();
252	}
253}
254
255void cpu_idle(void)
256{
257	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
258	default_idle();
 
 
259}
260
261int __cant_sleep(void) {
262	return in_atomic() || irqs_disabled() || in_interrupt();
263	/* Is in_interrupt() really needed? */
264}
265
266int user_context(unsigned long sp)
267{
268	unsigned long stack;
269
270	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
271	return stack != (unsigned long) current_thread_info();
272}
273
274extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
275
276void do_uml_exitcalls(void)
277{
278	exitcall_t *call;
279
280	call = &__uml_exitcall_end;
281	while (--call >= &__uml_exitcall_begin)
282		(*call)();
283}
284
285char *uml_strdup(const char *string)
286{
287	return kstrdup(string, GFP_KERNEL);
288}
 
289
290int copy_to_user_proc(void __user *to, void *from, int size)
291{
292	return copy_to_user(to, from, size);
293}
294
295int copy_from_user_proc(void *to, void __user *from, int size)
296{
297	return copy_from_user(to, from, size);
298}
299
300int clear_user_proc(void __user *buf, int size)
301{
302	return clear_user(buf, size);
303}
304
305int strlen_user_proc(char __user *str)
306{
307	return strlen_user(str);
308}
309
310int smp_sigio_handler(void)
311{
312#ifdef CONFIG_SMP
313	int cpu = current_thread_info()->cpu;
314	IPI_handler(cpu);
315	if (cpu != 0)
316		return 1;
317#endif
318	return 0;
319}
320
321int cpu(void)
322{
323	return current_thread_info()->cpu;
324}
325
326static atomic_t using_sysemu = ATOMIC_INIT(0);
327int sysemu_supported;
328
329void set_using_sysemu(int value)
330{
331	if (value > sysemu_supported)
332		return;
333	atomic_set(&using_sysemu, value);
334}
335
336int get_using_sysemu(void)
337{
338	return atomic_read(&using_sysemu);
339}
340
341static int sysemu_proc_show(struct seq_file *m, void *v)
342{
343	seq_printf(m, "%d\n", get_using_sysemu());
344	return 0;
345}
346
347static int sysemu_proc_open(struct inode *inode, struct file *file)
348{
349	return single_open(file, sysemu_proc_show, NULL);
350}
351
352static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
353				 size_t count, loff_t *pos)
354{
355	char tmp[2];
356
357	if (copy_from_user(tmp, buf, 1))
358		return -EFAULT;
359
360	if (tmp[0] >= '0' && tmp[0] <= '2')
361		set_using_sysemu(tmp[0] - '0');
362	/* We use the first char, but pretend to write everything */
363	return count;
364}
365
366static const struct file_operations sysemu_proc_fops = {
367	.owner		= THIS_MODULE,
368	.open		= sysemu_proc_open,
369	.read		= seq_read,
370	.llseek		= seq_lseek,
371	.release	= single_release,
372	.write		= sysemu_proc_write,
373};
374
375int __init make_proc_sysemu(void)
376{
377	struct proc_dir_entry *ent;
378	if (!sysemu_supported)
379		return 0;
380
381	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
382
383	if (ent == NULL)
384	{
385		printk(KERN_WARNING "Failed to register /proc/sysemu\n");
386		return 0;
387	}
388
389	return 0;
390}
391
392late_initcall(make_proc_sysemu);
393
394int singlestepping(void * t)
395{
396	struct task_struct *task = t ? t : current;
397
398	if (!(task->ptrace & PT_DTRACE))
399		return 0;
400
401	if (task->thread.singlestep_syscall)
402		return 1;
403
404	return 2;
405}
406
407/*
408 * Only x86 and x86_64 have an arch_align_stack().
409 * All other arches have "#define arch_align_stack(x) (x)"
410 * in their asm/system.h
411 * As this is included in UML from asm-um/system-generic.h,
412 * we can use it to behave as the subarch does.
413 */
414#ifndef arch_align_stack
415unsigned long arch_align_stack(unsigned long sp)
416{
417	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
418		sp -= get_random_int() % 8192;
419	return sp & ~0xf;
420}
421#endif
422
423unsigned long get_wchan(struct task_struct *p)
424{
425	unsigned long stack_page, sp, ip;
426	bool seen_sched = 0;
427
428	if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
429		return 0;
430
431	stack_page = (unsigned long) task_stack_page(p);
432	/* Bail if the process has no kernel stack for some reason */
433	if (stack_page == 0)
434		return 0;
435
436	sp = p->thread.switch_buf->JB_SP;
437	/*
438	 * Bail if the stack pointer is below the bottom of the kernel
439	 * stack for some reason
440	 */
441	if (sp < stack_page)
442		return 0;
443
444	while (sp < stack_page + THREAD_SIZE) {
445		ip = *((unsigned long *) sp);
446		if (in_sched_functions(ip))
447			/* Ignore everything until we're above the scheduler */
448			seen_sched = 1;
449		else if (kernel_text_address(ip) && seen_sched)
450			return ip;
451
452		sp += sizeof(unsigned long);
453	}
454
455	return 0;
456}
457
458int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
459{
460	int cpu = current_thread_info()->cpu;
461
462	return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
463}
464
v3.15
  1/*
  2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
  3 * Copyright 2003 PathScale, Inc.
  4 * Licensed under the GPL
  5 */
  6
  7#include <linux/stddef.h>
  8#include <linux/err.h>
  9#include <linux/hardirq.h>
 10#include <linux/mm.h>
 11#include <linux/module.h>
 12#include <linux/personality.h>
 13#include <linux/proc_fs.h>
 14#include <linux/ptrace.h>
 15#include <linux/random.h>
 16#include <linux/slab.h>
 17#include <linux/sched.h>
 18#include <linux/seq_file.h>
 19#include <linux/tick.h>
 20#include <linux/threads.h>
 21#include <linux/tracehook.h>
 22#include <asm/current.h>
 23#include <asm/pgtable.h>
 24#include <asm/mmu_context.h>
 25#include <asm/uaccess.h>
 26#include <as-layout.h>
 27#include <kern_util.h>
 28#include <os.h>
 29#include <skas.h>
 
 30
 31/*
 32 * This is a per-cpu array.  A processor only modifies its entry and it only
 33 * cares about its entry, so it's OK if another processor is modifying its
 34 * entry.
 35 */
 36struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
 37
 38static inline int external_pid(void)
 39{
 40	/* FIXME: Need to look up userspace_pid by cpu */
 41	return userspace_pid[0];
 42}
 43
 44int pid_to_processor_id(int pid)
 45{
 46	int i;
 47
 48	for (i = 0; i < ncpus; i++) {
 49		if (cpu_tasks[i].pid == pid)
 50			return i;
 51	}
 52	return -1;
 53}
 54
 55void free_stack(unsigned long stack, int order)
 56{
 57	free_pages(stack, order);
 58}
 59
 60unsigned long alloc_stack(int order, int atomic)
 61{
 62	unsigned long page;
 63	gfp_t flags = GFP_KERNEL;
 64
 65	if (atomic)
 66		flags = GFP_ATOMIC;
 67	page = __get_free_pages(flags, order);
 68
 69	return page;
 70}
 71
 
 
 
 
 
 
 
 
 
 
 
 72static inline void set_current(struct task_struct *task)
 73{
 74	cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
 75		{ external_pid(), task });
 76}
 77
 78extern void arch_switch_to(struct task_struct *to);
 79
 80void *__switch_to(struct task_struct *from, struct task_struct *to)
 81{
 
 
 
 82	to->thread.prev_sched = from;
 83	set_current(to);
 84
 85	switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
 86	arch_switch_to(current);
 
 
 
 
 
 
 
 
 
 
 
 87
 88	return current->thread.prev_sched;
 
 89}
 90
 91void interrupt_end(void)
 92{
 93	if (need_resched())
 94		schedule();
 95	if (test_thread_flag(TIF_SIGPENDING))
 96		do_signal();
 97	if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
 98		tracehook_notify_resume(&current->thread.regs);
 99}
100
101void exit_thread(void)
102{
103}
104
105int get_current_pid(void)
106{
107	return task_pid_nr(current);
108}
109
110/*
111 * This is called magically, by its address being stuffed in a jmp_buf
112 * and being longjmp-d to.
113 */
114void new_thread_handler(void)
115{
116	int (*fn)(void *), n;
117	void *arg;
118
119	if (current->thread.prev_sched != NULL)
120		schedule_tail(current->thread.prev_sched);
121	current->thread.prev_sched = NULL;
122
123	fn = current->thread.request.u.thread.proc;
124	arg = current->thread.request.u.thread.arg;
125
126	/*
127	 * callback returns only if the kernel thread execs a process
 
128	 */
129	n = fn(arg);
130	userspace(&current->thread.regs.regs);
 
 
 
 
 
131}
132
133/* Called magically, see new_thread_handler above */
134void fork_handler(void)
135{
136	force_flush_all();
137
138	schedule_tail(current->thread.prev_sched);
139
140	/*
141	 * XXX: if interrupt_end() calls schedule, this call to
142	 * arch_switch_to isn't needed. We could want to apply this to
143	 * improve performance. -bb
144	 */
145	arch_switch_to(current);
146
147	current->thread.prev_sched = NULL;
148
 
 
 
149	userspace(&current->thread.regs.regs);
150}
151
152int copy_thread(unsigned long clone_flags, unsigned long sp,
153		unsigned long arg, struct task_struct * p)
 
154{
155	void (*handler)(void);
156	int kthread = current->flags & PF_KTHREAD;
157	int ret = 0;
158
159	p->thread = (struct thread_struct) INIT_THREAD;
160
161	if (!kthread) {
162	  	memcpy(&p->thread.regs.regs, current_pt_regs(),
163		       sizeof(p->thread.regs.regs));
164		PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
165		if (sp != 0)
166			REGS_SP(p->thread.regs.regs.gp) = sp;
167
168		handler = fork_handler;
169
170		arch_copy_thread(&current->thread.arch, &p->thread.arch);
171	} else {
 
172		get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
173		p->thread.request.u.thread.proc = (int (*)(void *))sp;
174		p->thread.request.u.thread.arg = (void *)arg;
175		handler = new_thread_handler;
176	}
177
178	new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
179
180	if (!kthread) {
181		clear_flushed_tls(p);
182
183		/*
184		 * Set a new TLS for the child thread?
185		 */
186		if (clone_flags & CLONE_SETTLS)
187			ret = arch_copy_tls(p);
188	}
189
190	return ret;
191}
192
193void initial_thread_cb(void (*proc)(void *), void *arg)
194{
195	int save_kmalloc_ok = kmalloc_ok;
196
197	kmalloc_ok = 0;
198	initial_thread_cb_skas(proc, arg);
199	kmalloc_ok = save_kmalloc_ok;
200}
201
202void arch_cpu_idle(void)
203{
204	unsigned long long nsecs;
205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
206	cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
207	nsecs = disable_timer();
208	idle_sleep(nsecs);
209	local_irq_enable();
210}
211
212int __cant_sleep(void) {
213	return in_atomic() || irqs_disabled() || in_interrupt();
214	/* Is in_interrupt() really needed? */
215}
216
217int user_context(unsigned long sp)
218{
219	unsigned long stack;
220
221	stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
222	return stack != (unsigned long) current_thread_info();
223}
224
225extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
226
227void do_uml_exitcalls(void)
228{
229	exitcall_t *call;
230
231	call = &__uml_exitcall_end;
232	while (--call >= &__uml_exitcall_begin)
233		(*call)();
234}
235
236char *uml_strdup(const char *string)
237{
238	return kstrdup(string, GFP_KERNEL);
239}
240EXPORT_SYMBOL(uml_strdup);
241
242int copy_to_user_proc(void __user *to, void *from, int size)
243{
244	return copy_to_user(to, from, size);
245}
246
247int copy_from_user_proc(void *to, void __user *from, int size)
248{
249	return copy_from_user(to, from, size);
250}
251
252int clear_user_proc(void __user *buf, int size)
253{
254	return clear_user(buf, size);
255}
256
257int strlen_user_proc(char __user *str)
258{
259	return strlen_user(str);
260}
261
262int smp_sigio_handler(void)
263{
264#ifdef CONFIG_SMP
265	int cpu = current_thread_info()->cpu;
266	IPI_handler(cpu);
267	if (cpu != 0)
268		return 1;
269#endif
270	return 0;
271}
272
273int cpu(void)
274{
275	return current_thread_info()->cpu;
276}
277
278static atomic_t using_sysemu = ATOMIC_INIT(0);
279int sysemu_supported;
280
281void set_using_sysemu(int value)
282{
283	if (value > sysemu_supported)
284		return;
285	atomic_set(&using_sysemu, value);
286}
287
288int get_using_sysemu(void)
289{
290	return atomic_read(&using_sysemu);
291}
292
293static int sysemu_proc_show(struct seq_file *m, void *v)
294{
295	seq_printf(m, "%d\n", get_using_sysemu());
296	return 0;
297}
298
299static int sysemu_proc_open(struct inode *inode, struct file *file)
300{
301	return single_open(file, sysemu_proc_show, NULL);
302}
303
304static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
305				 size_t count, loff_t *pos)
306{
307	char tmp[2];
308
309	if (copy_from_user(tmp, buf, 1))
310		return -EFAULT;
311
312	if (tmp[0] >= '0' && tmp[0] <= '2')
313		set_using_sysemu(tmp[0] - '0');
314	/* We use the first char, but pretend to write everything */
315	return count;
316}
317
318static const struct file_operations sysemu_proc_fops = {
319	.owner		= THIS_MODULE,
320	.open		= sysemu_proc_open,
321	.read		= seq_read,
322	.llseek		= seq_lseek,
323	.release	= single_release,
324	.write		= sysemu_proc_write,
325};
326
327int __init make_proc_sysemu(void)
328{
329	struct proc_dir_entry *ent;
330	if (!sysemu_supported)
331		return 0;
332
333	ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
334
335	if (ent == NULL)
336	{
337		printk(KERN_WARNING "Failed to register /proc/sysemu\n");
338		return 0;
339	}
340
341	return 0;
342}
343
344late_initcall(make_proc_sysemu);
345
346int singlestepping(void * t)
347{
348	struct task_struct *task = t ? t : current;
349
350	if (!(task->ptrace & PT_DTRACE))
351		return 0;
352
353	if (task->thread.singlestep_syscall)
354		return 1;
355
356	return 2;
357}
358
359/*
360 * Only x86 and x86_64 have an arch_align_stack().
361 * All other arches have "#define arch_align_stack(x) (x)"
362 * in their asm/exec.h
363 * As this is included in UML from asm-um/system-generic.h,
364 * we can use it to behave as the subarch does.
365 */
366#ifndef arch_align_stack
367unsigned long arch_align_stack(unsigned long sp)
368{
369	if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
370		sp -= get_random_int() % 8192;
371	return sp & ~0xf;
372}
373#endif
374
375unsigned long get_wchan(struct task_struct *p)
376{
377	unsigned long stack_page, sp, ip;
378	bool seen_sched = 0;
379
380	if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
381		return 0;
382
383	stack_page = (unsigned long) task_stack_page(p);
384	/* Bail if the process has no kernel stack for some reason */
385	if (stack_page == 0)
386		return 0;
387
388	sp = p->thread.switch_buf->JB_SP;
389	/*
390	 * Bail if the stack pointer is below the bottom of the kernel
391	 * stack for some reason
392	 */
393	if (sp < stack_page)
394		return 0;
395
396	while (sp < stack_page + THREAD_SIZE) {
397		ip = *((unsigned long *) sp);
398		if (in_sched_functions(ip))
399			/* Ignore everything until we're above the scheduler */
400			seen_sched = 1;
401		else if (kernel_text_address(ip) && seen_sched)
402			return ip;
403
404		sp += sizeof(unsigned long);
405	}
406
407	return 0;
408}
409
410int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
411{
412	int cpu = current_thread_info()->cpu;
413
414	return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
415}
416