Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) 2015 Anton Ivanov (aivanov@{brocade.com,kot-begemot.co.uk})
4 * Copyright (C) 2015 Thomas Meyer (thomas@m3y3r.de)
5 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
6 * Copyright 2003 PathScale, Inc.
7 */
8
9#include <linux/stddef.h>
10#include <linux/err.h>
11#include <linux/hardirq.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/personality.h>
15#include <linux/proc_fs.h>
16#include <linux/ptrace.h>
17#include <linux/random.h>
18#include <linux/slab.h>
19#include <linux/sched.h>
20#include <linux/sched/debug.h>
21#include <linux/sched/task.h>
22#include <linux/sched/task_stack.h>
23#include <linux/seq_file.h>
24#include <linux/tick.h>
25#include <linux/threads.h>
26#include <linux/tracehook.h>
27#include <asm/current.h>
28#include <asm/mmu_context.h>
29#include <linux/uaccess.h>
30#include <as-layout.h>
31#include <kern_util.h>
32#include <os.h>
33#include <skas.h>
34#include <linux/time-internal.h>
35
36/*
37 * This is a per-cpu array. A processor only modifies its entry and it only
38 * cares about its entry, so it's OK if another processor is modifying its
39 * entry.
40 */
41struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
42
43static inline int external_pid(void)
44{
45 /* FIXME: Need to look up userspace_pid by cpu */
46 return userspace_pid[0];
47}
48
49int pid_to_processor_id(int pid)
50{
51 int i;
52
53 for (i = 0; i < ncpus; i++) {
54 if (cpu_tasks[i].pid == pid)
55 return i;
56 }
57 return -1;
58}
59
60void free_stack(unsigned long stack, int order)
61{
62 free_pages(stack, order);
63}
64
65unsigned long alloc_stack(int order, int atomic)
66{
67 unsigned long page;
68 gfp_t flags = GFP_KERNEL;
69
70 if (atomic)
71 flags = GFP_ATOMIC;
72 page = __get_free_pages(flags, order);
73
74 return page;
75}
76
77static inline void set_current(struct task_struct *task)
78{
79 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
80 { external_pid(), task });
81}
82
83extern void arch_switch_to(struct task_struct *to);
84
85void *__switch_to(struct task_struct *from, struct task_struct *to)
86{
87 to->thread.prev_sched = from;
88 set_current(to);
89
90 switch_threads(&from->thread.switch_buf, &to->thread.switch_buf);
91 arch_switch_to(current);
92
93 return current->thread.prev_sched;
94}
95
96void interrupt_end(void)
97{
98 struct pt_regs *regs = ¤t->thread.regs;
99
100 if (need_resched())
101 schedule();
102 if (test_thread_flag(TIF_SIGPENDING))
103 do_signal(regs);
104 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
105 tracehook_notify_resume(regs);
106}
107
108int get_current_pid(void)
109{
110 return task_pid_nr(current);
111}
112
113/*
114 * This is called magically, by its address being stuffed in a jmp_buf
115 * and being longjmp-d to.
116 */
117void new_thread_handler(void)
118{
119 int (*fn)(void *), n;
120 void *arg;
121
122 if (current->thread.prev_sched != NULL)
123 schedule_tail(current->thread.prev_sched);
124 current->thread.prev_sched = NULL;
125
126 fn = current->thread.request.u.thread.proc;
127 arg = current->thread.request.u.thread.arg;
128
129 /*
130 * callback returns only if the kernel thread execs a process
131 */
132 n = fn(arg);
133 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
134}
135
136/* Called magically, see new_thread_handler above */
137void fork_handler(void)
138{
139 force_flush_all();
140
141 schedule_tail(current->thread.prev_sched);
142
143 /*
144 * XXX: if interrupt_end() calls schedule, this call to
145 * arch_switch_to isn't needed. We could want to apply this to
146 * improve performance. -bb
147 */
148 arch_switch_to(current);
149
150 current->thread.prev_sched = NULL;
151
152 userspace(¤t->thread.regs.regs, current_thread_info()->aux_fp_regs);
153}
154
155int copy_thread(unsigned long clone_flags, unsigned long sp,
156 unsigned long arg, struct task_struct * p, unsigned long tls)
157{
158 void (*handler)(void);
159 int kthread = current->flags & PF_KTHREAD;
160 int ret = 0;
161
162 p->thread = (struct thread_struct) INIT_THREAD;
163
164 if (!kthread) {
165 memcpy(&p->thread.regs.regs, current_pt_regs(),
166 sizeof(p->thread.regs.regs));
167 PT_REGS_SET_SYSCALL_RETURN(&p->thread.regs, 0);
168 if (sp != 0)
169 REGS_SP(p->thread.regs.regs.gp) = sp;
170
171 handler = fork_handler;
172
173 arch_copy_thread(¤t->thread.arch, &p->thread.arch);
174 } else {
175 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
176 p->thread.request.u.thread.proc = (int (*)(void *))sp;
177 p->thread.request.u.thread.arg = (void *)arg;
178 handler = new_thread_handler;
179 }
180
181 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
182
183 if (!kthread) {
184 clear_flushed_tls(p);
185
186 /*
187 * Set a new TLS for the child thread?
188 */
189 if (clone_flags & CLONE_SETTLS)
190 ret = arch_set_tls(p, tls);
191 }
192
193 return ret;
194}
195
196void initial_thread_cb(void (*proc)(void *), void *arg)
197{
198 int save_kmalloc_ok = kmalloc_ok;
199
200 kmalloc_ok = 0;
201 initial_thread_cb_skas(proc, arg);
202 kmalloc_ok = save_kmalloc_ok;
203}
204
205static void um_idle_sleep(void)
206{
207 unsigned long long duration = UM_NSEC_PER_SEC;
208
209 if (time_travel_mode != TT_MODE_OFF) {
210 time_travel_sleep(duration);
211 } else {
212 os_idle_sleep(duration);
213 }
214}
215
216void arch_cpu_idle(void)
217{
218 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
219 um_idle_sleep();
220 local_irq_enable();
221}
222
223int __cant_sleep(void) {
224 return in_atomic() || irqs_disabled() || in_interrupt();
225 /* Is in_interrupt() really needed? */
226}
227
228int user_context(unsigned long sp)
229{
230 unsigned long stack;
231
232 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
233 return stack != (unsigned long) current_thread_info();
234}
235
236extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
237
238void do_uml_exitcalls(void)
239{
240 exitcall_t *call;
241
242 call = &__uml_exitcall_end;
243 while (--call >= &__uml_exitcall_begin)
244 (*call)();
245}
246
247char *uml_strdup(const char *string)
248{
249 return kstrdup(string, GFP_KERNEL);
250}
251EXPORT_SYMBOL(uml_strdup);
252
253int copy_to_user_proc(void __user *to, void *from, int size)
254{
255 return copy_to_user(to, from, size);
256}
257
258int copy_from_user_proc(void *to, void __user *from, int size)
259{
260 return copy_from_user(to, from, size);
261}
262
263int clear_user_proc(void __user *buf, int size)
264{
265 return clear_user(buf, size);
266}
267
268int cpu(void)
269{
270 return current_thread_info()->cpu;
271}
272
273static atomic_t using_sysemu = ATOMIC_INIT(0);
274int sysemu_supported;
275
276void set_using_sysemu(int value)
277{
278 if (value > sysemu_supported)
279 return;
280 atomic_set(&using_sysemu, value);
281}
282
283int get_using_sysemu(void)
284{
285 return atomic_read(&using_sysemu);
286}
287
288static int sysemu_proc_show(struct seq_file *m, void *v)
289{
290 seq_printf(m, "%d\n", get_using_sysemu());
291 return 0;
292}
293
294static int sysemu_proc_open(struct inode *inode, struct file *file)
295{
296 return single_open(file, sysemu_proc_show, NULL);
297}
298
299static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
300 size_t count, loff_t *pos)
301{
302 char tmp[2];
303
304 if (copy_from_user(tmp, buf, 1))
305 return -EFAULT;
306
307 if (tmp[0] >= '0' && tmp[0] <= '2')
308 set_using_sysemu(tmp[0] - '0');
309 /* We use the first char, but pretend to write everything */
310 return count;
311}
312
313static const struct proc_ops sysemu_proc_ops = {
314 .proc_open = sysemu_proc_open,
315 .proc_read = seq_read,
316 .proc_lseek = seq_lseek,
317 .proc_release = single_release,
318 .proc_write = sysemu_proc_write,
319};
320
321int __init make_proc_sysemu(void)
322{
323 struct proc_dir_entry *ent;
324 if (!sysemu_supported)
325 return 0;
326
327 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_ops);
328
329 if (ent == NULL)
330 {
331 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
332 return 0;
333 }
334
335 return 0;
336}
337
338late_initcall(make_proc_sysemu);
339
340int singlestepping(void * t)
341{
342 struct task_struct *task = t ? t : current;
343
344 if (!(task->ptrace & PT_DTRACE))
345 return 0;
346
347 if (task->thread.singlestep_syscall)
348 return 1;
349
350 return 2;
351}
352
353/*
354 * Only x86 and x86_64 have an arch_align_stack().
355 * All other arches have "#define arch_align_stack(x) (x)"
356 * in their asm/exec.h
357 * As this is included in UML from asm-um/system-generic.h,
358 * we can use it to behave as the subarch does.
359 */
360#ifndef arch_align_stack
361unsigned long arch_align_stack(unsigned long sp)
362{
363 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
364 sp -= get_random_int() % 8192;
365 return sp & ~0xf;
366}
367#endif
368
369unsigned long get_wchan(struct task_struct *p)
370{
371 unsigned long stack_page, sp, ip;
372 bool seen_sched = 0;
373
374 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
375 return 0;
376
377 stack_page = (unsigned long) task_stack_page(p);
378 /* Bail if the process has no kernel stack for some reason */
379 if (stack_page == 0)
380 return 0;
381
382 sp = p->thread.switch_buf->JB_SP;
383 /*
384 * Bail if the stack pointer is below the bottom of the kernel
385 * stack for some reason
386 */
387 if (sp < stack_page)
388 return 0;
389
390 while (sp < stack_page + THREAD_SIZE) {
391 ip = *((unsigned long *) sp);
392 if (in_sched_functions(ip))
393 /* Ignore everything until we're above the scheduler */
394 seen_sched = 1;
395 else if (kernel_text_address(ip) && seen_sched)
396 return ip;
397
398 sp += sizeof(unsigned long);
399 }
400
401 return 0;
402}
403
404int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
405{
406 int cpu = current_thread_info()->cpu;
407
408 return save_i387_registers(userspace_pid[cpu], (unsigned long *) fpu);
409}
410
1/*
2 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
3 * Copyright 2003 PathScale, Inc.
4 * Licensed under the GPL
5 */
6
7#include <linux/stddef.h>
8#include <linux/err.h>
9#include <linux/hardirq.h>
10#include <linux/mm.h>
11#include <linux/module.h>
12#include <linux/personality.h>
13#include <linux/proc_fs.h>
14#include <linux/ptrace.h>
15#include <linux/random.h>
16#include <linux/slab.h>
17#include <linux/sched.h>
18#include <linux/seq_file.h>
19#include <linux/tick.h>
20#include <linux/threads.h>
21#include <linux/tracehook.h>
22#include <asm/current.h>
23#include <asm/pgtable.h>
24#include <asm/mmu_context.h>
25#include <asm/uaccess.h>
26#include "as-layout.h"
27#include "kern_util.h"
28#include "os.h"
29#include "skas.h"
30
31/*
32 * This is a per-cpu array. A processor only modifies its entry and it only
33 * cares about its entry, so it's OK if another processor is modifying its
34 * entry.
35 */
36struct cpu_task cpu_tasks[NR_CPUS] = { [0 ... NR_CPUS - 1] = { -1, NULL } };
37
38static inline int external_pid(void)
39{
40 /* FIXME: Need to look up userspace_pid by cpu */
41 return userspace_pid[0];
42}
43
44int pid_to_processor_id(int pid)
45{
46 int i;
47
48 for (i = 0; i < ncpus; i++) {
49 if (cpu_tasks[i].pid == pid)
50 return i;
51 }
52 return -1;
53}
54
55void free_stack(unsigned long stack, int order)
56{
57 free_pages(stack, order);
58}
59
60unsigned long alloc_stack(int order, int atomic)
61{
62 unsigned long page;
63 gfp_t flags = GFP_KERNEL;
64
65 if (atomic)
66 flags = GFP_ATOMIC;
67 page = __get_free_pages(flags, order);
68
69 return page;
70}
71
72int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
73{
74 int pid;
75
76 current->thread.request.u.thread.proc = fn;
77 current->thread.request.u.thread.arg = arg;
78 pid = do_fork(CLONE_VM | CLONE_UNTRACED | flags, 0,
79 ¤t->thread.regs, 0, NULL, NULL);
80 return pid;
81}
82EXPORT_SYMBOL(kernel_thread);
83
84static inline void set_current(struct task_struct *task)
85{
86 cpu_tasks[task_thread_info(task)->cpu] = ((struct cpu_task)
87 { external_pid(), task });
88}
89
90extern void arch_switch_to(struct task_struct *to);
91
92void *__switch_to(struct task_struct *from, struct task_struct *to)
93{
94 to->thread.prev_sched = from;
95 set_current(to);
96
97 do {
98 current->thread.saved_task = NULL;
99
100 switch_threads(&from->thread.switch_buf,
101 &to->thread.switch_buf);
102
103 arch_switch_to(current);
104
105 if (current->thread.saved_task)
106 show_regs(&(current->thread.regs));
107 to = current->thread.saved_task;
108 from = current;
109 } while (current->thread.saved_task);
110
111 return current->thread.prev_sched;
112}
113
114void interrupt_end(void)
115{
116 if (need_resched())
117 schedule();
118 if (test_thread_flag(TIF_SIGPENDING))
119 do_signal();
120 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
121 tracehook_notify_resume(¤t->thread.regs);
122}
123
124void exit_thread(void)
125{
126}
127
128int get_current_pid(void)
129{
130 return task_pid_nr(current);
131}
132
133/*
134 * This is called magically, by its address being stuffed in a jmp_buf
135 * and being longjmp-d to.
136 */
137void new_thread_handler(void)
138{
139 int (*fn)(void *), n;
140 void *arg;
141
142 if (current->thread.prev_sched != NULL)
143 schedule_tail(current->thread.prev_sched);
144 current->thread.prev_sched = NULL;
145
146 fn = current->thread.request.u.thread.proc;
147 arg = current->thread.request.u.thread.arg;
148
149 /*
150 * The return value is 1 if the kernel thread execs a process,
151 * 0 if it just exits
152 */
153 n = run_kernel_thread(fn, arg, ¤t->thread.exec_buf);
154 if (n == 1) {
155 /* Handle any immediate reschedules or signals */
156 interrupt_end();
157 userspace(¤t->thread.regs.regs);
158 }
159 else do_exit(0);
160}
161
162/* Called magically, see new_thread_handler above */
163void fork_handler(void)
164{
165 force_flush_all();
166
167 schedule_tail(current->thread.prev_sched);
168
169 /*
170 * XXX: if interrupt_end() calls schedule, this call to
171 * arch_switch_to isn't needed. We could want to apply this to
172 * improve performance. -bb
173 */
174 arch_switch_to(current);
175
176 current->thread.prev_sched = NULL;
177
178 /* Handle any immediate reschedules or signals */
179 interrupt_end();
180
181 userspace(¤t->thread.regs.regs);
182}
183
184int copy_thread(unsigned long clone_flags, unsigned long sp,
185 unsigned long stack_top, struct task_struct * p,
186 struct pt_regs *regs)
187{
188 void (*handler)(void);
189 int ret = 0;
190
191 p->thread = (struct thread_struct) INIT_THREAD;
192
193 if (current->thread.forking) {
194 memcpy(&p->thread.regs.regs, ®s->regs,
195 sizeof(p->thread.regs.regs));
196 UPT_SET_SYSCALL_RETURN(&p->thread.regs.regs, 0);
197 if (sp != 0)
198 REGS_SP(p->thread.regs.regs.gp) = sp;
199
200 handler = fork_handler;
201
202 arch_copy_thread(¤t->thread.arch, &p->thread.arch);
203 }
204 else {
205 get_safe_registers(p->thread.regs.regs.gp, p->thread.regs.regs.fp);
206 p->thread.request.u.thread = current->thread.request.u.thread;
207 handler = new_thread_handler;
208 }
209
210 new_thread(task_stack_page(p), &p->thread.switch_buf, handler);
211
212 if (current->thread.forking) {
213 clear_flushed_tls(p);
214
215 /*
216 * Set a new TLS for the child thread?
217 */
218 if (clone_flags & CLONE_SETTLS)
219 ret = arch_copy_tls(p);
220 }
221
222 return ret;
223}
224
225void initial_thread_cb(void (*proc)(void *), void *arg)
226{
227 int save_kmalloc_ok = kmalloc_ok;
228
229 kmalloc_ok = 0;
230 initial_thread_cb_skas(proc, arg);
231 kmalloc_ok = save_kmalloc_ok;
232}
233
234void default_idle(void)
235{
236 unsigned long long nsecs;
237
238 while (1) {
239 /* endless idle loop with no priority at all */
240
241 /*
242 * although we are an idle CPU, we do not want to
243 * get into the scheduler unnecessarily.
244 */
245 if (need_resched())
246 schedule();
247
248 tick_nohz_idle_enter();
249 rcu_idle_enter();
250 nsecs = disable_timer();
251 idle_sleep(nsecs);
252 rcu_idle_exit();
253 tick_nohz_idle_exit();
254 }
255}
256
257void cpu_idle(void)
258{
259 cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
260 default_idle();
261}
262
263int __cant_sleep(void) {
264 return in_atomic() || irqs_disabled() || in_interrupt();
265 /* Is in_interrupt() really needed? */
266}
267
268int user_context(unsigned long sp)
269{
270 unsigned long stack;
271
272 stack = sp & (PAGE_MASK << CONFIG_KERNEL_STACK_ORDER);
273 return stack != (unsigned long) current_thread_info();
274}
275
276extern exitcall_t __uml_exitcall_begin, __uml_exitcall_end;
277
278void do_uml_exitcalls(void)
279{
280 exitcall_t *call;
281
282 call = &__uml_exitcall_end;
283 while (--call >= &__uml_exitcall_begin)
284 (*call)();
285}
286
287char *uml_strdup(const char *string)
288{
289 return kstrdup(string, GFP_KERNEL);
290}
291EXPORT_SYMBOL(uml_strdup);
292
293int copy_to_user_proc(void __user *to, void *from, int size)
294{
295 return copy_to_user(to, from, size);
296}
297
298int copy_from_user_proc(void *to, void __user *from, int size)
299{
300 return copy_from_user(to, from, size);
301}
302
303int clear_user_proc(void __user *buf, int size)
304{
305 return clear_user(buf, size);
306}
307
308int strlen_user_proc(char __user *str)
309{
310 return strlen_user(str);
311}
312
313int smp_sigio_handler(void)
314{
315#ifdef CONFIG_SMP
316 int cpu = current_thread_info()->cpu;
317 IPI_handler(cpu);
318 if (cpu != 0)
319 return 1;
320#endif
321 return 0;
322}
323
324int cpu(void)
325{
326 return current_thread_info()->cpu;
327}
328
329static atomic_t using_sysemu = ATOMIC_INIT(0);
330int sysemu_supported;
331
332void set_using_sysemu(int value)
333{
334 if (value > sysemu_supported)
335 return;
336 atomic_set(&using_sysemu, value);
337}
338
339int get_using_sysemu(void)
340{
341 return atomic_read(&using_sysemu);
342}
343
344static int sysemu_proc_show(struct seq_file *m, void *v)
345{
346 seq_printf(m, "%d\n", get_using_sysemu());
347 return 0;
348}
349
350static int sysemu_proc_open(struct inode *inode, struct file *file)
351{
352 return single_open(file, sysemu_proc_show, NULL);
353}
354
355static ssize_t sysemu_proc_write(struct file *file, const char __user *buf,
356 size_t count, loff_t *pos)
357{
358 char tmp[2];
359
360 if (copy_from_user(tmp, buf, 1))
361 return -EFAULT;
362
363 if (tmp[0] >= '0' && tmp[0] <= '2')
364 set_using_sysemu(tmp[0] - '0');
365 /* We use the first char, but pretend to write everything */
366 return count;
367}
368
369static const struct file_operations sysemu_proc_fops = {
370 .owner = THIS_MODULE,
371 .open = sysemu_proc_open,
372 .read = seq_read,
373 .llseek = seq_lseek,
374 .release = single_release,
375 .write = sysemu_proc_write,
376};
377
378int __init make_proc_sysemu(void)
379{
380 struct proc_dir_entry *ent;
381 if (!sysemu_supported)
382 return 0;
383
384 ent = proc_create("sysemu", 0600, NULL, &sysemu_proc_fops);
385
386 if (ent == NULL)
387 {
388 printk(KERN_WARNING "Failed to register /proc/sysemu\n");
389 return 0;
390 }
391
392 return 0;
393}
394
395late_initcall(make_proc_sysemu);
396
397int singlestepping(void * t)
398{
399 struct task_struct *task = t ? t : current;
400
401 if (!(task->ptrace & PT_DTRACE))
402 return 0;
403
404 if (task->thread.singlestep_syscall)
405 return 1;
406
407 return 2;
408}
409
410/*
411 * Only x86 and x86_64 have an arch_align_stack().
412 * All other arches have "#define arch_align_stack(x) (x)"
413 * in their asm/system.h
414 * As this is included in UML from asm-um/system-generic.h,
415 * we can use it to behave as the subarch does.
416 */
417#ifndef arch_align_stack
418unsigned long arch_align_stack(unsigned long sp)
419{
420 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
421 sp -= get_random_int() % 8192;
422 return sp & ~0xf;
423}
424#endif
425
426unsigned long get_wchan(struct task_struct *p)
427{
428 unsigned long stack_page, sp, ip;
429 bool seen_sched = 0;
430
431 if ((p == NULL) || (p == current) || (p->state == TASK_RUNNING))
432 return 0;
433
434 stack_page = (unsigned long) task_stack_page(p);
435 /* Bail if the process has no kernel stack for some reason */
436 if (stack_page == 0)
437 return 0;
438
439 sp = p->thread.switch_buf->JB_SP;
440 /*
441 * Bail if the stack pointer is below the bottom of the kernel
442 * stack for some reason
443 */
444 if (sp < stack_page)
445 return 0;
446
447 while (sp < stack_page + THREAD_SIZE) {
448 ip = *((unsigned long *) sp);
449 if (in_sched_functions(ip))
450 /* Ignore everything until we're above the scheduler */
451 seen_sched = 1;
452 else if (kernel_text_address(ip) && seen_sched)
453 return ip;
454
455 sp += sizeof(unsigned long);
456 }
457
458 return 0;
459}
460
461int elf_core_copy_fpregs(struct task_struct *t, elf_fpregset_t *fpu)
462{
463 int cpu = current_thread_info()->cpu;
464
465 return save_fp_registers(userspace_pid[cpu], (unsigned long *) fpu);
466}
467