Loading...
1/* This file is subject to the terms and conditions of the GNU General Public
2 * License. See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/psw.h>
17#include <asm/pdc.h>
18
19#include <asm/assembly.h>
20#include <asm/pgtable.h>
21
22#include <linux/linkage.h>
23#include <linux/init.h>
24
25 .level LEVEL
26
27 __INITDATA
28ENTRY(boot_args)
29 .word 0 /* arg0 */
30 .word 0 /* arg1 */
31 .word 0 /* arg2 */
32 .word 0 /* arg3 */
33END(boot_args)
34
35 __HEAD
36
37 .align 4
38 .import init_thread_union,data
39 .import fault_vector_20,code /* IVA parisc 2.0 32 bit */
40#ifndef CONFIG_64BIT
41 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
42 .import $global$ /* forward declaration */
43#endif /*!CONFIG_64BIT*/
44ENTRY(parisc_kernel_start)
45 .proc
46 .callinfo
47
48 /* Make sure sr4-sr7 are set to zero for the kernel address space */
49 mtsp %r0,%sr4
50 mtsp %r0,%sr5
51 mtsp %r0,%sr6
52 mtsp %r0,%sr7
53
54 /* Clear BSS (shouldn't the boot loader do this?) */
55
56 .import __bss_start,data
57 .import __bss_stop,data
58
59 load32 PA(__bss_start),%r3
60 load32 PA(__bss_stop),%r4
61$bss_loop:
62 cmpb,<<,n %r3,%r4,$bss_loop
63 stw,ma %r0,4(%r3)
64
65 /* Save away the arguments the boot loader passed in (32 bit args) */
66 load32 PA(boot_args),%r1
67 stw,ma %arg0,4(%r1)
68 stw,ma %arg1,4(%r1)
69 stw,ma %arg2,4(%r1)
70 stw,ma %arg3,4(%r1)
71
72 /* Initialize startup VM. Just map first 16/32 MB of memory */
73 load32 PA(swapper_pg_dir),%r4
74 mtctl %r4,%cr24 /* Initialize kernel root pointer */
75 mtctl %r4,%cr25 /* Initialize user root pointer */
76
77#if CONFIG_PGTABLE_LEVELS == 3
78 /* Set pmd in pgd */
79 load32 PA(pmd0),%r5
80 shrd %r5,PxD_VALUE_SHIFT,%r3
81 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
82 stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
83 ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
84#else
85 /* 2-level page table, so pmd == pgd */
86 ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
87#endif
88
89 /* Fill in pmd with enough pte directories */
90 load32 PA(pg0),%r1
91 SHRREG %r1,PxD_VALUE_SHIFT,%r3
92 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
93
94 ldi ASM_PT_INITIAL,%r1
95
961:
97 stw %r3,0(%r4)
98 ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
99 addib,> -1,%r1,1b
100#if CONFIG_PGTABLE_LEVELS == 3
101 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
102#else
103 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
104#endif
105
106
107 /* Now initialize the PTEs themselves. We use RWX for
108 * everything ... it will get remapped correctly later */
109 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
110 load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
111 load32 PA(pg0),%r1
112
113$pgt_fill_loop:
114 STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
115 ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
116 addib,> -1,%r11,$pgt_fill_loop
117 nop
118
119 /* Load the return address...er...crash 'n burn */
120 copy %r0,%r2
121
122 /* And the RFI Target address too */
123 load32 start_parisc,%r11
124
125 /* And the initial task pointer */
126 load32 init_thread_union,%r6
127 mtctl %r6,%cr30
128
129 /* And the stack pointer too */
130 ldo THREAD_SZ_ALGN(%r6),%sp
131
132#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
133 .import _mcount,data
134 /* initialize mcount FPTR */
135 /* Get the global data pointer */
136 loadgp
137 load32 PA(_mcount), %r10
138 std %dp,0x18(%r10)
139#endif
140
141#ifdef CONFIG_SMP
142 /* Set the smp rendezvous address into page zero.
143 ** It would be safer to do this in init_smp_config() but
144 ** it's just way easier to deal with here because
145 ** of 64-bit function ptrs and the address is local to this file.
146 */
147 load32 PA(smp_slave_stext),%r10
148 stw %r10,0x10(%r0) /* MEM_RENDEZ */
149 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
150
151 /* FALLTHROUGH */
152 .procend
153
154 /*
155 ** Code Common to both Monarch and Slave processors.
156 ** Entry:
157 **
158 ** 1.1:
159 ** %r11 must contain RFI target address.
160 ** %r25/%r26 args to pass to target function
161 ** %r2 in case rfi target decides it didn't like something
162 **
163 ** 2.0w:
164 ** %r3 PDCE_PROC address
165 ** %r11 RFI target address
166 **
167 ** Caller must init: SR4-7, %sp, %r10, %cr24/25,
168 */
169common_stext:
170 .proc
171 .callinfo
172#else
173 /* Clear PDC entry point - we won't use it */
174 stw %r0,0x10(%r0) /* MEM_RENDEZ */
175 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
176#endif /*CONFIG_SMP*/
177
178#ifdef CONFIG_64BIT
179 tophys_r1 %sp
180
181 /* Save the rfi target address */
182 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
183 tophys_r1 %r10
184 std %r11, TASK_PT_GR11(%r10)
185 /* Switch to wide mode Superdome doesn't support narrow PDC
186 ** calls.
187 */
1881: mfia %rp /* clear upper part of pcoq */
189 ldo 2f-1b(%rp),%rp
190 depdi 0,31,32,%rp
191 bv (%rp)
192 ssm PSW_SM_W,%r0
193
194 /* Set Wide mode as the "Default" (eg for traps)
195 ** First trap occurs *right* after (or part of) rfi for slave CPUs.
196 ** Someday, palo might not do this for the Monarch either.
197 */
1982:
199#define MEM_PDC_LO 0x388
200#define MEM_PDC_HI 0x35C
201 ldw MEM_PDC_LO(%r0),%r3
202 ldw MEM_PDC_HI(%r0),%r6
203 depd %r6, 31, 32, %r3 /* move to upper word */
204
205 mfctl %cr30,%r6 /* PCX-W2 firmware bug */
206
207 ldo PDC_PSW(%r0),%arg0 /* 21 */
208 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
209 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
210 load32 PA(stext_pdc_ret), %rp
211 bv (%r3)
212 copy %r0,%arg3
213
214stext_pdc_ret:
215 mtctl %r6,%cr30 /* restore task thread info */
216
217 /* restore rfi target address*/
218 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
219 tophys_r1 %r10
220 ldd TASK_PT_GR11(%r10), %r11
221 tovirt_r1 %sp
222#endif
223
224 /* PARANOID: clear user scratch/user space SR's */
225 mtsp %r0,%sr0
226 mtsp %r0,%sr1
227 mtsp %r0,%sr2
228 mtsp %r0,%sr3
229
230 /* Initialize Protection Registers */
231 mtctl %r0,%cr8
232 mtctl %r0,%cr9
233 mtctl %r0,%cr12
234 mtctl %r0,%cr13
235
236 /* Initialize the global data pointer */
237 loadgp
238
239 /* Set up our interrupt table. HPMCs might not work after this!
240 *
241 * We need to install the correct iva for PA1.1 or PA2.0. The
242 * following short sequence of instructions can determine this
243 * (without being illegal on a PA1.1 machine).
244 */
245#ifndef CONFIG_64BIT
246 ldi 32,%r10
247 mtctl %r10,%cr11
248 .level 2.0
249 mfctl,w %cr11,%r10
250 .level 1.1
251 comib,<>,n 0,%r10,$is_pa20
252 ldil L%PA(fault_vector_11),%r10
253 b $install_iva
254 ldo R%PA(fault_vector_11)(%r10),%r10
255
256$is_pa20:
257 .level LEVEL /* restore 1.1 || 2.0w */
258#endif /*!CONFIG_64BIT*/
259 load32 PA(fault_vector_20),%r10
260
261$install_iva:
262 mtctl %r10,%cr14
263
264 b aligned_rfi /* Prepare to RFI! Man all the cannons! */
265 nop
266
267 .align 128
268aligned_rfi:
269 pcxt_ssm_bug
270
271 rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
272 /* Don't need NOPs, have 8 compliant insn before rfi */
273
274 mtctl %r0,%cr17 /* Clear IIASQ tail */
275 mtctl %r0,%cr17 /* Clear IIASQ head */
276
277 /* Load RFI target into PC queue */
278 mtctl %r11,%cr18 /* IIAOQ head */
279 ldo 4(%r11),%r11
280 mtctl %r11,%cr18 /* IIAOQ tail */
281
282 load32 KERNEL_PSW,%r10
283 mtctl %r10,%ipsw
284
285 /* Jump through hyperspace to Virt Mode */
286 rfi
287 nop
288
289 .procend
290
291#ifdef CONFIG_SMP
292
293 .import smp_init_current_idle_task,data
294 .import smp_callin,code
295
296#ifndef CONFIG_64BIT
297smp_callin_rtn:
298 .proc
299 .callinfo
300 break 1,1 /* Break if returned from start_secondary */
301 nop
302 nop
303 .procend
304#endif /*!CONFIG_64BIT*/
305
306/***************************************************************************
307* smp_slave_stext is executed by all non-monarch Processors when the Monarch
308* pokes the slave CPUs in smp.c:smp_boot_cpus().
309*
310* Once here, registers values are initialized in order to branch to virtual
311* mode. Once all available/eligible CPUs are in virtual mode, all are
312* released and start out by executing their own idle task.
313*****************************************************************************/
314smp_slave_stext:
315 .proc
316 .callinfo
317
318 /*
319 ** Initialize Space registers
320 */
321 mtsp %r0,%sr4
322 mtsp %r0,%sr5
323 mtsp %r0,%sr6
324 mtsp %r0,%sr7
325
326 /* Initialize the SP - monarch sets up smp_init_current_idle_task */
327 load32 PA(smp_init_current_idle_task),%sp
328 LDREG 0(%sp),%sp /* load task address */
329 tophys_r1 %sp
330 LDREG TASK_THREAD_INFO(%sp),%sp
331 mtctl %sp,%cr30 /* store in cr30 */
332 ldo THREAD_SZ_ALGN(%sp),%sp
333
334 /* point CPU to kernel page tables */
335 load32 PA(swapper_pg_dir),%r4
336 mtctl %r4,%cr24 /* Initialize kernel root pointer */
337 mtctl %r4,%cr25 /* Initialize user root pointer */
338
339#ifdef CONFIG_64BIT
340 /* Setup PDCE_PROC entry */
341 copy %arg0,%r3
342#else
343 /* Load RFI *return* address in case smp_callin bails */
344 load32 smp_callin_rtn,%r2
345#endif
346
347 /* Load RFI target address. */
348 load32 smp_callin,%r11
349
350 /* ok...common code can handle the rest */
351 b common_stext
352 nop
353
354 .procend
355#endif /* CONFIG_SMP */
356
357ENDPROC(parisc_kernel_start)
358
359#ifndef CONFIG_64BIT
360 .section .data..read_mostly
361
362 .align 4
363 .export $global$,data
364
365 .type $global$,@object
366 .size $global$,4
367$global$:
368 .word 0
369#endif /*!CONFIG_64BIT*/
1/* This file is subject to the terms and conditions of the GNU General Public
2 * License. See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/psw.h>
17#include <asm/pdc.h>
18
19#include <asm/assembly.h>
20
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <linux/pgtable.h>
24
25 .level 1.1
26
27 __INITDATA
28ENTRY(boot_args)
29 .word 0 /* arg0 */
30 .word 0 /* arg1 */
31 .word 0 /* arg2 */
32 .word 0 /* arg3 */
33END(boot_args)
34
35 __HEAD
36
37 .align 4
38 .import init_task,data
39 .import init_stack,data
40 .import fault_vector_20,code /* IVA parisc 2.0 32 bit */
41#ifndef CONFIG_64BIT
42 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
43 .import $global$ /* forward declaration */
44#endif /*!CONFIG_64BIT*/
45ENTRY(parisc_kernel_start)
46 .proc
47 .callinfo
48
49 /* Make sure sr4-sr7 are set to zero for the kernel address space */
50 mtsp %r0,%sr4
51 mtsp %r0,%sr5
52 mtsp %r0,%sr6
53 mtsp %r0,%sr7
54
55 /* Clear BSS (shouldn't the boot loader do this?) */
56
57 .import __bss_start,data
58 .import __bss_stop,data
59
60 load32 PA(__bss_start),%r3
61 load32 PA(__bss_stop),%r4
62$bss_loop:
63 cmpb,<<,n %r3,%r4,$bss_loop
64 stw,ma %r0,4(%r3)
65
66 /* Save away the arguments the boot loader passed in (32 bit args) */
67 load32 PA(boot_args),%r1
68 stw,ma %arg0,4(%r1)
69 stw,ma %arg1,4(%r1)
70 stw,ma %arg2,4(%r1)
71 stw,ma %arg3,4(%r1)
72
73#if !defined(CONFIG_64BIT) && defined(CONFIG_PA20)
74 /* This 32-bit kernel was compiled for PA2.0 CPUs. Check current CPU
75 * and halt kernel if we detect a PA1.x CPU. */
76 ldi 32,%r10
77 mtctl %r10,%cr11
78 .level 2.0
79 mfctl,w %cr11,%r10
80 .level 1.1
81 comib,<>,n 0,%r10,$cpu_ok
82
83 load32 PA(msg1),%arg0
84 ldi msg1_end-msg1,%arg1
85$iodc_panic:
86 copy %arg0, %r10
87 copy %arg1, %r11
88 load32 PA(init_stack),%sp
89#define MEM_CONS 0x3A0
90 ldw MEM_CONS+32(%r0),%arg0 // HPA
91 ldi ENTRY_IO_COUT,%arg1
92 ldw MEM_CONS+36(%r0),%arg2 // SPA
93 ldw MEM_CONS+8(%r0),%arg3 // layers
94 load32 PA(__bss_start),%r1
95 stw %r1,-52(%sp) // arg4
96 stw %r0,-56(%sp) // arg5
97 stw %r10,-60(%sp) // arg6 = ptr to text
98 stw %r11,-64(%sp) // arg7 = len
99 stw %r0,-68(%sp) // arg8
100 load32 PA(.iodc_panic_ret), %rp
101 ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC
102 bv,n (%r1)
103.iodc_panic_ret:
104 b . /* wait endless with ... */
105 or %r10,%r10,%r10 /* qemu idle sleep */
106msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
107msg1_end:
108
109$cpu_ok:
110#endif
111
112 .level PA_ASM_LEVEL
113
114 /* Initialize startup VM. Just map first 16/32 MB of memory */
115 load32 PA(swapper_pg_dir),%r4
116 mtctl %r4,%cr24 /* Initialize kernel root pointer */
117 mtctl %r4,%cr25 /* Initialize user root pointer */
118
119#if CONFIG_PGTABLE_LEVELS == 3
120 /* Set pmd in pgd */
121 load32 PA(pmd0),%r5
122 shrd %r5,PxD_VALUE_SHIFT,%r3
123 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
124 stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
125 ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
126#else
127 /* 2-level page table, so pmd == pgd */
128 ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
129#endif
130
131 /* Fill in pmd with enough pte directories */
132 load32 PA(pg0),%r1
133 SHRREG %r1,PxD_VALUE_SHIFT,%r3
134 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
135
136 ldi ASM_PT_INITIAL,%r1
137
1381:
139 stw %r3,0(%r4)
140 ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
141 addib,> -1,%r1,1b
142#if CONFIG_PGTABLE_LEVELS == 3
143 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
144#else
145 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
146#endif
147
148
149 /* Now initialize the PTEs themselves. We use RWX for
150 * everything ... it will get remapped correctly later */
151 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
152 load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
153 load32 PA(pg0),%r1
154
155$pgt_fill_loop:
156 STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
157 ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
158 addib,> -1,%r11,$pgt_fill_loop
159 nop
160
161 /* Load the return address...er...crash 'n burn */
162 copy %r0,%r2
163
164 /* And the RFI Target address too */
165 load32 start_parisc,%r11
166
167 /* And the initial task pointer */
168 load32 init_task,%r6
169 mtctl %r6,%cr30
170
171 /* And the stack pointer too */
172 load32 init_stack,%sp
173 tophys_r1 %sp
174#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
175 .import _mcount,data
176 /* initialize mcount FPTR */
177 /* Get the global data pointer */
178 loadgp
179 load32 PA(_mcount), %r10
180 std %dp,0x18(%r10)
181#endif
182
183#ifdef CONFIG_64BIT
184 /* Get PDCE_PROC for monarch CPU. */
185#define MEM_PDC_LO 0x388
186#define MEM_PDC_HI 0x35C
187 ldw MEM_PDC_LO(%r0),%r3
188 ldw MEM_PDC_HI(%r0),%r10
189 depd %r10, 31, 32, %r3 /* move to upper word */
190#endif
191
192
193#ifdef CONFIG_SMP
194 /* Set the smp rendezvous address into page zero.
195 ** It would be safer to do this in init_smp_config() but
196 ** it's just way easier to deal with here because
197 ** of 64-bit function ptrs and the address is local to this file.
198 */
199 load32 PA(smp_slave_stext),%r10
200 stw %r10,0x10(%r0) /* MEM_RENDEZ */
201 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
202
203 /* FALLTHROUGH */
204 .procend
205
206#ifdef CONFIG_HOTPLUG_CPU
207 /* common_stext is far away in another section... jump there */
208 load32 PA(common_stext), %rp
209 bv,n (%rp)
210
211 /* common_stext and smp_slave_stext needs to be in text section */
212 .text
213#endif
214
215 /*
216 ** Code Common to both Monarch and Slave processors.
217 ** Entry:
218 **
219 ** 1.1:
220 ** %r11 must contain RFI target address.
221 ** %r25/%r26 args to pass to target function
222 ** %r2 in case rfi target decides it didn't like something
223 **
224 ** 2.0w:
225 ** %r3 PDCE_PROC address
226 ** %r11 RFI target address
227 **
228 ** Caller must init: SR4-7, %sp, %r10, %cr24/25,
229 */
230common_stext:
231 .proc
232 .callinfo
233#else
234 /* Clear PDC entry point - we won't use it */
235 stw %r0,0x10(%r0) /* MEM_RENDEZ */
236 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
237#endif /*CONFIG_SMP*/
238
239#ifdef CONFIG_64BIT
240 mfctl %cr30,%r6 /* PCX-W2 firmware bug */
241 tophys_r1 %r6
242
243 /* Save the rfi target address */
244 STREG %r11, TASK_PT_GR11(%r6)
245 /* Switch to wide mode Superdome doesn't support narrow PDC
246 ** calls.
247 */
2481: mfia %rp /* clear upper part of pcoq */
249 ldo 2f-1b(%rp),%rp
250 depdi 0,31,32,%rp
251 bv (%rp)
252 ssm PSW_SM_W,%r0
253
254 /* Set Wide mode as the "Default" (eg for traps)
255 ** First trap occurs *right* after (or part of) rfi for slave CPUs.
256 ** Someday, palo might not do this for the Monarch either.
257 */
2582:
259
260 ldo PDC_PSW(%r0),%arg0 /* 21 */
261 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
262 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
263 load32 PA(stext_pdc_ret), %rp
264 bv (%r3)
265 copy %r0,%arg3
266
267stext_pdc_ret:
268 LDREG TASK_PT_GR11(%r6), %r11
269 tovirt_r1 %r6
270 mtctl %r6,%cr30 /* restore task thread info */
271#endif
272
273 /* PARANOID: clear user scratch/user space SR's */
274 mtsp %r0,%sr0
275 mtsp %r0,%sr1
276 mtsp %r0,%sr2
277 mtsp %r0,%sr3
278
279 /* Initialize Protection Registers */
280 mtctl %r0,%cr8
281 mtctl %r0,%cr9
282 mtctl %r0,%cr12
283 mtctl %r0,%cr13
284
285 /* Initialize the global data pointer */
286 loadgp
287
288 /* Set up our interrupt table. HPMCs might not work after this!
289 *
290 * We need to install the correct iva for PA1.1 or PA2.0. The
291 * following short sequence of instructions can determine this
292 * (without being illegal on a PA1.1 machine).
293 */
294#ifndef CONFIG_64BIT
295 ldi 32,%r10
296 mtctl %r10,%cr11
297 .level 2.0
298 mfctl,w %cr11,%r10
299 .level 1.1
300 comib,<>,n 0,%r10,$is_pa20
301 ldil L%PA(fault_vector_11),%r10
302 b $install_iva
303 ldo R%PA(fault_vector_11)(%r10),%r10
304
305$is_pa20:
306 .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
307#endif /*!CONFIG_64BIT*/
308 load32 PA(fault_vector_20),%r10
309
310$install_iva:
311 mtctl %r10,%cr14
312
313 b aligned_rfi /* Prepare to RFI! Man all the cannons! */
314 nop
315
316 .align 128
317aligned_rfi:
318 pcxt_ssm_bug
319
320 copy %r3, %arg0 /* PDCE_PROC for smp_callin() */
321
322 rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
323 /* Don't need NOPs, have 8 compliant insn before rfi */
324
325 mtctl %r0,%cr17 /* Clear IIASQ tail */
326 mtctl %r0,%cr17 /* Clear IIASQ head */
327
328 /* Load RFI target into PC queue */
329 mtctl %r11,%cr18 /* IIAOQ head */
330 ldo 4(%r11),%r11
331 mtctl %r11,%cr18 /* IIAOQ tail */
332
333 load32 KERNEL_PSW,%r10
334 mtctl %r10,%ipsw
335
336 tovirt_r1 %sp
337
338 /* Jump through hyperspace to Virt Mode */
339 rfi
340 nop
341
342 .procend
343
344#ifdef CONFIG_SMP
345
346 .import smp_init_current_idle_task,data
347 .import smp_callin,code
348
349#ifndef CONFIG_64BIT
350smp_callin_rtn:
351 .proc
352 .callinfo
353 break 1,1 /* Break if returned from start_secondary */
354 nop
355 nop
356 .procend
357#endif /*!CONFIG_64BIT*/
358
359/***************************************************************************
360* smp_slave_stext is executed by all non-monarch Processors when the Monarch
361* pokes the slave CPUs in smp.c:smp_boot_cpus().
362*
363* Once here, registers values are initialized in order to branch to virtual
364* mode. Once all available/eligible CPUs are in virtual mode, all are
365* released and start out by executing their own idle task.
366*****************************************************************************/
367smp_slave_stext:
368 .proc
369 .callinfo
370
371 /*
372 ** Initialize Space registers
373 */
374 mtsp %r0,%sr4
375 mtsp %r0,%sr5
376 mtsp %r0,%sr6
377 mtsp %r0,%sr7
378
379#ifdef CONFIG_64BIT
380 /*
381 * Enable Wide mode early, in case the task_struct for the idle
382 * task in smp_init_current_idle_task was allocated above 4GB.
383 */
3841: mfia %rp /* clear upper part of pcoq */
385 ldo 2f-1b(%rp),%rp
386 depdi 0,31,32,%rp
387 bv (%rp)
388 ssm PSW_SM_W,%r0
3892:
390#endif
391
392 /* Initialize the SP - monarch sets up smp_init_current_idle_task */
393 load32 PA(smp_init_current_idle_task),%r6
394 LDREG 0(%r6),%r6
395 mtctl %r6,%cr30
396 tophys_r1 %r6
397 LDREG TASK_STACK(%r6),%sp
398 tophys_r1 %sp
399 ldo FRAME_SIZE(%sp),%sp
400
401 /* point CPU to kernel page tables */
402 load32 PA(swapper_pg_dir),%r4
403 mtctl %r4,%cr24 /* Initialize kernel root pointer */
404 mtctl %r4,%cr25 /* Initialize user root pointer */
405
406#ifdef CONFIG_64BIT
407 /* Setup PDCE_PROC entry */
408 copy %arg0,%r3
409#else
410 /* Load RFI *return* address in case smp_callin bails */
411 load32 smp_callin_rtn,%r2
412#endif
413
414 /* Load RFI target address. */
415 load32 smp_callin,%r11
416
417 /* ok...common code can handle the rest */
418 b common_stext
419 nop
420
421 .procend
422#endif /* CONFIG_SMP */
423
424#ifndef CONFIG_64BIT
425 .section .data..ro_after_init
426
427 .align 4
428 .export $global$,data
429
430 .type $global$,@object
431 .size $global$,4
432$global$:
433 .word 0
434#endif /*!CONFIG_64BIT*/