Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PARISC Architecture-dependent parts of process handling
4 * based on the work for i386
5 *
6 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
7 * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
8 * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
9 * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
10 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
11 * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
12 * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
13 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
14 * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
15 * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
16 * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
17 * Copyright (C) 2001-2014 Helge Deller <deller@gmx.de>
18 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
19 */
20
21#include <stdarg.h>
22
23#include <linux/elf.h>
24#include <linux/errno.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/fs.h>
28#include <linux/cpu.h>
29#include <linux/module.h>
30#include <linux/personality.h>
31#include <linux/ptrace.h>
32#include <linux/sched.h>
33#include <linux/sched/debug.h>
34#include <linux/sched/task.h>
35#include <linux/sched/task_stack.h>
36#include <linux/slab.h>
37#include <linux/stddef.h>
38#include <linux/unistd.h>
39#include <linux/kallsyms.h>
40#include <linux/uaccess.h>
41#include <linux/rcupdate.h>
42#include <linux/random.h>
43#include <linux/nmi.h>
44
45#include <asm/io.h>
46#include <asm/asm-offsets.h>
47#include <asm/assembly.h>
48#include <asm/pdc.h>
49#include <asm/pdc_chassis.h>
50#include <asm/pgalloc.h>
51#include <asm/unwind.h>
52#include <asm/sections.h>
53
54#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
55#define CMD_RESET 5 /* reset any module */
56
57/*
58** The Wright Brothers and Gecko systems have a H/W problem
59** (Lasi...'nuf said) may cause a broadcast reset to lockup
60** the system. An HVERSION dependent PDC call was developed
61** to perform a "safe", platform specific broadcast reset instead
62** of kludging up all the code.
63**
64** Older machines which do not implement PDC_BROADCAST_RESET will
65** return (with an error) and the regular broadcast reset can be
66** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
67** the PDC call will not return (the system will be reset).
68*/
69void machine_restart(char *cmd)
70{
71#ifdef FASTBOOT_SELFTEST_SUPPORT
72 /*
73 ** If user has modified the Firmware Selftest Bitmap,
74 ** run the tests specified in the bitmap after the
75 ** system is rebooted w/PDC_DO_RESET.
76 **
77 ** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
78 **
79 ** Using "directed resets" at each processor with the MEM_TOC
80 ** vector cleared will also avoid running destructive
81 ** memory self tests. (Not implemented yet)
82 */
83 if (ftc_bitmap) {
84 pdc_do_firm_test_reset(ftc_bitmap);
85 }
86#endif
87 /* set up a new led state on systems shipped with a LED State panel */
88 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
89
90 /* "Normal" system reset */
91 pdc_do_reset();
92
93 /* Nope...box should reset with just CMD_RESET now */
94 gsc_writel(CMD_RESET, COMMAND_GLOBAL);
95
96 /* Wait for RESET to lay us to rest. */
97 while (1) ;
98
99}
100
101void (*chassis_power_off)(void);
102
103/*
104 * This routine is called from sys_reboot to actually turn off the
105 * machine
106 */
107void machine_power_off(void)
108{
109 /* If there is a registered power off handler, call it. */
110 if (chassis_power_off)
111 chassis_power_off();
112
113 /* Put the soft power button back under hardware control.
114 * If the user had already pressed the power button, the
115 * following call will immediately power off. */
116 pdc_soft_power_button(0);
117
118 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
119
120 /* ipmi_poweroff may have been installed. */
121 if (pm_power_off)
122 pm_power_off();
123
124 /* It seems we have no way to power the system off via
125 * software. The user has to press the button himself. */
126
127 printk(KERN_EMERG "System shut down completed.\n"
128 "Please power this system off now.");
129
130 /* prevent soft lockup/stalled CPU messages for endless loop. */
131 rcu_sysrq_start();
132 lockup_detector_soft_poweroff();
133 for (;;);
134}
135
136void (*pm_power_off)(void);
137EXPORT_SYMBOL(pm_power_off);
138
139void machine_halt(void)
140{
141 machine_power_off();
142}
143
144void flush_thread(void)
145{
146 /* Only needs to handle fpu stuff or perf monitors.
147 ** REVISIT: several arches implement a "lazy fpu state".
148 */
149}
150
151void release_thread(struct task_struct *dead_task)
152{
153}
154
155/*
156 * Fill in the FPU structure for a core dump.
157 */
158
159int dump_fpu (struct pt_regs * regs, elf_fpregset_t *r)
160{
161 if (regs == NULL)
162 return 0;
163
164 memcpy(r, regs->fr, sizeof *r);
165 return 1;
166}
167
168int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
169{
170 memcpy(r, tsk->thread.regs.fr, sizeof(*r));
171 return 1;
172}
173
174/*
175 * Idle thread support
176 *
177 * Detect when running on QEMU with SeaBIOS PDC Firmware and let
178 * QEMU idle the host too.
179 */
180
181int running_on_qemu __ro_after_init;
182EXPORT_SYMBOL(running_on_qemu);
183
184void __cpuidle arch_cpu_idle_dead(void)
185{
186 /* nop on real hardware, qemu will offline CPU. */
187 asm volatile("or %%r31,%%r31,%%r31\n":::);
188}
189
190void __cpuidle arch_cpu_idle(void)
191{
192 local_irq_enable();
193
194 /* nop on real hardware, qemu will idle sleep. */
195 asm volatile("or %%r10,%%r10,%%r10\n":::);
196}
197
198static int __init parisc_idle_init(void)
199{
200 if (!running_on_qemu)
201 cpu_idle_poll_ctrl(1);
202
203 return 0;
204}
205arch_initcall(parisc_idle_init);
206
207/*
208 * Copy architecture-specific thread state
209 */
210int
211copy_thread(unsigned long clone_flags, unsigned long usp,
212 unsigned long kthread_arg, struct task_struct *p)
213{
214 struct pt_regs *cregs = &(p->thread.regs);
215 void *stack = task_stack_page(p);
216
217 /* We have to use void * instead of a function pointer, because
218 * function pointers aren't a pointer to the function on 64-bit.
219 * Make them const so the compiler knows they live in .text */
220 extern void * const ret_from_kernel_thread;
221 extern void * const child_return;
222
223 if (unlikely(p->flags & PF_KTHREAD)) {
224 /* kernel thread */
225 memset(cregs, 0, sizeof(struct pt_regs));
226 if (!usp) /* idle thread */
227 return 0;
228 /* Must exit via ret_from_kernel_thread in order
229 * to call schedule_tail()
230 */
231 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
232 cregs->kpc = (unsigned long) &ret_from_kernel_thread;
233 /*
234 * Copy function and argument to be called from
235 * ret_from_kernel_thread.
236 */
237#ifdef CONFIG_64BIT
238 cregs->gr[27] = ((unsigned long *)usp)[3];
239 cregs->gr[26] = ((unsigned long *)usp)[2];
240#else
241 cregs->gr[26] = usp;
242#endif
243 cregs->gr[25] = kthread_arg;
244 } else {
245 /* user thread */
246 /* usp must be word aligned. This also prevents users from
247 * passing in the value 1 (which is the signal for a special
248 * return for a kernel thread) */
249 if (usp) {
250 usp = ALIGN(usp, 4);
251 if (likely(usp))
252 cregs->gr[30] = usp;
253 }
254 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
255 cregs->kpc = (unsigned long) &child_return;
256
257 /* Setup thread TLS area from the 4th parameter in clone */
258 if (clone_flags & CLONE_SETTLS)
259 cregs->cr27 = cregs->gr[23];
260 }
261
262 return 0;
263}
264
265unsigned long
266get_wchan(struct task_struct *p)
267{
268 struct unwind_frame_info info;
269 unsigned long ip;
270 int count = 0;
271
272 if (!p || p == current || p->state == TASK_RUNNING)
273 return 0;
274
275 /*
276 * These bracket the sleeping functions..
277 */
278
279 unwind_frame_init_from_blocked_task(&info, p);
280 do {
281 if (unwind_once(&info) < 0)
282 return 0;
283 ip = info.ip;
284 if (!in_sched_functions(ip))
285 return ip;
286 } while (count++ < MAX_UNWIND_ENTRIES);
287 return 0;
288}
289
290#ifdef CONFIG_64BIT
291void *dereference_function_descriptor(void *ptr)
292{
293 Elf64_Fdesc *desc = ptr;
294 void *p;
295
296 if (!probe_kernel_address(&desc->addr, p))
297 ptr = p;
298 return ptr;
299}
300
301void *dereference_kernel_function_descriptor(void *ptr)
302{
303 if (ptr < (void *)__start_opd ||
304 ptr >= (void *)__end_opd)
305 return ptr;
306
307 return dereference_function_descriptor(ptr);
308}
309#endif
310
311static inline unsigned long brk_rnd(void)
312{
313 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
314}
315
316unsigned long arch_randomize_brk(struct mm_struct *mm)
317{
318 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
319
320 if (ret < mm->brk)
321 return mm->brk;
322 return ret;
323}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * PARISC Architecture-dependent parts of process handling
4 * based on the work for i386
5 *
6 * Copyright (C) 1999-2003 Matthew Wilcox <willy at parisc-linux.org>
7 * Copyright (C) 2000 Martin K Petersen <mkp at mkp.net>
8 * Copyright (C) 2000 John Marvin <jsm at parisc-linux.org>
9 * Copyright (C) 2000 David Huggins-Daines <dhd with pobox.org>
10 * Copyright (C) 2000-2003 Paul Bame <bame at parisc-linux.org>
11 * Copyright (C) 2000 Philipp Rumpf <prumpf with tux.org>
12 * Copyright (C) 2000 David Kennedy <dkennedy with linuxcare.com>
13 * Copyright (C) 2000 Richard Hirst <rhirst with parisc-linux.org>
14 * Copyright (C) 2000 Grant Grundler <grundler with parisc-linux.org>
15 * Copyright (C) 2001 Alan Modra <amodra at parisc-linux.org>
16 * Copyright (C) 2001-2002 Ryan Bradetich <rbrad at parisc-linux.org>
17 * Copyright (C) 2001-2014 Helge Deller <deller@gmx.de>
18 * Copyright (C) 2002 Randolph Chung <tausq with parisc-linux.org>
19 */
20
21#include <stdarg.h>
22
23#include <linux/elf.h>
24#include <linux/errno.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/fs.h>
28#include <linux/cpu.h>
29#include <linux/module.h>
30#include <linux/personality.h>
31#include <linux/ptrace.h>
32#include <linux/sched.h>
33#include <linux/sched/debug.h>
34#include <linux/sched/task.h>
35#include <linux/sched/task_stack.h>
36#include <linux/slab.h>
37#include <linux/stddef.h>
38#include <linux/unistd.h>
39#include <linux/kallsyms.h>
40#include <linux/uaccess.h>
41#include <linux/rcupdate.h>
42#include <linux/random.h>
43#include <linux/nmi.h>
44
45#include <asm/io.h>
46#include <asm/asm-offsets.h>
47#include <asm/assembly.h>
48#include <asm/pdc.h>
49#include <asm/pdc_chassis.h>
50#include <asm/unwind.h>
51#include <asm/sections.h>
52
53#define COMMAND_GLOBAL F_EXTEND(0xfffe0030)
54#define CMD_RESET 5 /* reset any module */
55
56/*
57** The Wright Brothers and Gecko systems have a H/W problem
58** (Lasi...'nuf said) may cause a broadcast reset to lockup
59** the system. An HVERSION dependent PDC call was developed
60** to perform a "safe", platform specific broadcast reset instead
61** of kludging up all the code.
62**
63** Older machines which do not implement PDC_BROADCAST_RESET will
64** return (with an error) and the regular broadcast reset can be
65** issued. Obviously, if the PDC does implement PDC_BROADCAST_RESET
66** the PDC call will not return (the system will be reset).
67*/
68void machine_restart(char *cmd)
69{
70#ifdef FASTBOOT_SELFTEST_SUPPORT
71 /*
72 ** If user has modified the Firmware Selftest Bitmap,
73 ** run the tests specified in the bitmap after the
74 ** system is rebooted w/PDC_DO_RESET.
75 **
76 ** ftc_bitmap = 0x1AUL "Skip destructive memory tests"
77 **
78 ** Using "directed resets" at each processor with the MEM_TOC
79 ** vector cleared will also avoid running destructive
80 ** memory self tests. (Not implemented yet)
81 */
82 if (ftc_bitmap) {
83 pdc_do_firm_test_reset(ftc_bitmap);
84 }
85#endif
86 /* set up a new led state on systems shipped with a LED State panel */
87 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
88
89 /* "Normal" system reset */
90 pdc_do_reset();
91
92 /* Nope...box should reset with just CMD_RESET now */
93 gsc_writel(CMD_RESET, COMMAND_GLOBAL);
94
95 /* Wait for RESET to lay us to rest. */
96 while (1) ;
97
98}
99
100void (*chassis_power_off)(void);
101
102/*
103 * This routine is called from sys_reboot to actually turn off the
104 * machine
105 */
106void machine_power_off(void)
107{
108 /* If there is a registered power off handler, call it. */
109 if (chassis_power_off)
110 chassis_power_off();
111
112 /* Put the soft power button back under hardware control.
113 * If the user had already pressed the power button, the
114 * following call will immediately power off. */
115 pdc_soft_power_button(0);
116
117 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_SHUTDOWN);
118
119 /* ipmi_poweroff may have been installed. */
120 if (pm_power_off)
121 pm_power_off();
122
123 /* It seems we have no way to power the system off via
124 * software. The user has to press the button himself. */
125
126 printk(KERN_EMERG "System shut down completed.\n"
127 "Please power this system off now.");
128
129 /* prevent soft lockup/stalled CPU messages for endless loop. */
130 rcu_sysrq_start();
131 lockup_detector_soft_poweroff();
132 for (;;);
133}
134
135void (*pm_power_off)(void);
136EXPORT_SYMBOL(pm_power_off);
137
138void machine_halt(void)
139{
140 machine_power_off();
141}
142
143void flush_thread(void)
144{
145 /* Only needs to handle fpu stuff or perf monitors.
146 ** REVISIT: several arches implement a "lazy fpu state".
147 */
148}
149
150void release_thread(struct task_struct *dead_task)
151{
152}
153
154/*
155 * Idle thread support
156 *
157 * Detect when running on QEMU with SeaBIOS PDC Firmware and let
158 * QEMU idle the host too.
159 */
160
161int running_on_qemu __ro_after_init;
162EXPORT_SYMBOL(running_on_qemu);
163
164void __cpuidle arch_cpu_idle_dead(void)
165{
166 /* nop on real hardware, qemu will offline CPU. */
167 asm volatile("or %%r31,%%r31,%%r31\n":::);
168}
169
170void __cpuidle arch_cpu_idle(void)
171{
172 raw_local_irq_enable();
173
174 /* nop on real hardware, qemu will idle sleep. */
175 asm volatile("or %%r10,%%r10,%%r10\n":::);
176}
177
178static int __init parisc_idle_init(void)
179{
180 if (!running_on_qemu)
181 cpu_idle_poll_ctrl(1);
182
183 return 0;
184}
185arch_initcall(parisc_idle_init);
186
187/*
188 * Copy architecture-specific thread state
189 */
190int
191copy_thread(unsigned long clone_flags, unsigned long usp,
192 unsigned long kthread_arg, struct task_struct *p, unsigned long tls)
193{
194 struct pt_regs *cregs = &(p->thread.regs);
195 void *stack = task_stack_page(p);
196
197 /* We have to use void * instead of a function pointer, because
198 * function pointers aren't a pointer to the function on 64-bit.
199 * Make them const so the compiler knows they live in .text */
200 extern void * const ret_from_kernel_thread;
201 extern void * const child_return;
202
203 if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
204 /* kernel thread */
205 memset(cregs, 0, sizeof(struct pt_regs));
206 if (!usp) /* idle thread */
207 return 0;
208 /* Must exit via ret_from_kernel_thread in order
209 * to call schedule_tail()
210 */
211 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
212 cregs->kpc = (unsigned long) &ret_from_kernel_thread;
213 /*
214 * Copy function and argument to be called from
215 * ret_from_kernel_thread.
216 */
217#ifdef CONFIG_64BIT
218 cregs->gr[27] = ((unsigned long *)usp)[3];
219 cregs->gr[26] = ((unsigned long *)usp)[2];
220#else
221 cregs->gr[26] = usp;
222#endif
223 cregs->gr[25] = kthread_arg;
224 } else {
225 /* user thread */
226 /* usp must be word aligned. This also prevents users from
227 * passing in the value 1 (which is the signal for a special
228 * return for a kernel thread) */
229 if (usp) {
230 usp = ALIGN(usp, 4);
231 if (likely(usp))
232 cregs->gr[30] = usp;
233 }
234 cregs->ksp = (unsigned long)stack + THREAD_SZ_ALGN + FRAME_SIZE;
235 cregs->kpc = (unsigned long) &child_return;
236
237 /* Setup thread TLS area */
238 if (clone_flags & CLONE_SETTLS)
239 cregs->cr27 = tls;
240 }
241
242 return 0;
243}
244
245unsigned long
246get_wchan(struct task_struct *p)
247{
248 struct unwind_frame_info info;
249 unsigned long ip;
250 int count = 0;
251
252 if (!p || p == current || task_is_running(p))
253 return 0;
254
255 /*
256 * These bracket the sleeping functions..
257 */
258
259 unwind_frame_init_from_blocked_task(&info, p);
260 do {
261 if (unwind_once(&info) < 0)
262 return 0;
263 if (task_is_running(p))
264 return 0;
265 ip = info.ip;
266 if (!in_sched_functions(ip))
267 return ip;
268 } while (count++ < MAX_UNWIND_ENTRIES);
269 return 0;
270}
271
272#ifdef CONFIG_64BIT
273void *dereference_function_descriptor(void *ptr)
274{
275 Elf64_Fdesc *desc = ptr;
276 void *p;
277
278 if (!get_kernel_nofault(p, (void *)&desc->addr))
279 ptr = p;
280 return ptr;
281}
282
283void *dereference_kernel_function_descriptor(void *ptr)
284{
285 if (ptr < (void *)__start_opd ||
286 ptr >= (void *)__end_opd)
287 return ptr;
288
289 return dereference_function_descriptor(ptr);
290}
291#endif
292
293static inline unsigned long brk_rnd(void)
294{
295 return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
296}
297
298unsigned long arch_randomize_brk(struct mm_struct *mm)
299{
300 unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
301
302 if (ret < mm->brk)
303 return mm->brk;
304 return ret;
305}