Loading...
1/* This file is subject to the terms and conditions of the GNU General Public
2 * License. See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/psw.h>
17#include <asm/pdc.h>
18
19#include <asm/assembly.h>
20
21#include <linux/linkage.h>
22#include <linux/init.h>
23#include <linux/pgtable.h>
24
25 .level 1.1
26
27 __INITDATA
28ENTRY(boot_args)
29 .word 0 /* arg0 */
30 .word 0 /* arg1 */
31 .word 0 /* arg2 */
32 .word 0 /* arg3 */
33END(boot_args)
34
35 __HEAD
36
37 .align 4
38 .import init_task,data
39 .import init_stack,data
40 .import fault_vector_20,code /* IVA parisc 2.0 32 bit */
41#ifndef CONFIG_64BIT
42 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
43 .import $global$ /* forward declaration */
44#endif /*!CONFIG_64BIT*/
45ENTRY(parisc_kernel_start)
46 .proc
47 .callinfo
48
49 /* Make sure sr4-sr7 are set to zero for the kernel address space */
50 mtsp %r0,%sr4
51 mtsp %r0,%sr5
52 mtsp %r0,%sr6
53 mtsp %r0,%sr7
54
55 /* Clear BSS (shouldn't the boot loader do this?) */
56
57 .import __bss_start,data
58 .import __bss_stop,data
59
60 load32 PA(__bss_start),%r3
61 load32 PA(__bss_stop),%r4
62$bss_loop:
63 cmpb,<<,n %r3,%r4,$bss_loop
64 stw,ma %r0,4(%r3)
65
66 /* Save away the arguments the boot loader passed in (32 bit args) */
67 load32 PA(boot_args),%r1
68 stw,ma %arg0,4(%r1)
69 stw,ma %arg1,4(%r1)
70 stw,ma %arg2,4(%r1)
71 stw,ma %arg3,4(%r1)
72
73#if defined(CONFIG_PA20)
74 /* check for 64-bit capable CPU as required by current kernel */
75 ldi 32,%r10
76 mtctl %r10,%cr11
77 .level 2.0
78 mfctl,w %cr11,%r10
79 .level 1.1
80 comib,<>,n 0,%r10,$cpu_ok
81
82 load32 PA(msg1),%arg0
83 ldi msg1_end-msg1,%arg1
84$iodc_panic:
85 copy %arg0, %r10
86 copy %arg1, %r11
87 load32 PA(init_stack),%sp
88#define MEM_CONS 0x3A0
89 ldw MEM_CONS+32(%r0),%arg0 // HPA
90 ldi ENTRY_IO_COUT,%arg1
91 ldw MEM_CONS+36(%r0),%arg2 // SPA
92 ldw MEM_CONS+8(%r0),%arg3 // layers
93 load32 PA(__bss_start),%r1
94 stw %r1,-52(%sp) // arg4
95 stw %r0,-56(%sp) // arg5
96 stw %r10,-60(%sp) // arg6 = ptr to text
97 stw %r11,-64(%sp) // arg7 = len
98 stw %r0,-68(%sp) // arg8
99 load32 PA(.iodc_panic_ret), %rp
100 ldw MEM_CONS+40(%r0),%r1 // ENTRY_IODC
101 bv,n (%r1)
102.iodc_panic_ret:
103 b . /* wait endless with ... */
104 or %r10,%r10,%r10 /* qemu idle sleep */
105msg1: .ascii "Can't boot kernel which was built for PA8x00 CPUs on this machine.\r\n"
106msg1_end:
107
108$cpu_ok:
109#endif
110
111 .level PA_ASM_LEVEL
112
113 /* Initialize startup VM. Just map first 16/32 MB of memory */
114 load32 PA(swapper_pg_dir),%r4
115 mtctl %r4,%cr24 /* Initialize kernel root pointer */
116 mtctl %r4,%cr25 /* Initialize user root pointer */
117
118#if CONFIG_PGTABLE_LEVELS == 3
119 /* Set pmd in pgd */
120 load32 PA(pmd0),%r5
121 shrd %r5,PxD_VALUE_SHIFT,%r3
122 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
123 stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
124 ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
125#else
126 /* 2-level page table, so pmd == pgd */
127 ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
128#endif
129
130 /* Fill in pmd with enough pte directories */
131 load32 PA(pg0),%r1
132 SHRREG %r1,PxD_VALUE_SHIFT,%r3
133 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
134
135 ldi ASM_PT_INITIAL,%r1
136
1371:
138 stw %r3,0(%r4)
139 ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
140 addib,> -1,%r1,1b
141#if CONFIG_PGTABLE_LEVELS == 3
142 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
143#else
144 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
145#endif
146
147
148 /* Now initialize the PTEs themselves. We use RWX for
149 * everything ... it will get remapped correctly later */
150 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
151 load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
152 load32 PA(pg0),%r1
153
154$pgt_fill_loop:
155 STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
156 ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
157 addib,> -1,%r11,$pgt_fill_loop
158 nop
159
160 /* Load the return address...er...crash 'n burn */
161 copy %r0,%r2
162
163 /* And the RFI Target address too */
164 load32 start_parisc,%r11
165
166 /* And the initial task pointer */
167 load32 init_task,%r6
168 mtctl %r6,%cr30
169
170 /* And the stack pointer too */
171 load32 init_stack,%sp
172 tophys_r1 %sp
173#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
174 .import _mcount,data
175 /* initialize mcount FPTR */
176 /* Get the global data pointer */
177 loadgp
178 load32 PA(_mcount), %r10
179 std %dp,0x18(%r10)
180#endif
181
182#define MEM_PDC_LO 0x388
183#define MEM_PDC_HI 0x35C
184#ifdef CONFIG_64BIT
185 /* Get PDCE_PROC for monarch CPU. */
186 ldw MEM_PDC_LO(%r0),%r3
187 ldw MEM_PDC_HI(%r0),%r10
188 depd %r10, 31, 32, %r3 /* move to upper word */
189#endif
190
191
192#ifdef CONFIG_SMP
193 /* Set the smp rendezvous address into page zero.
194 ** It would be safer to do this in init_smp_config() but
195 ** it's just way easier to deal with here because
196 ** of 64-bit function ptrs and the address is local to this file.
197 */
198 load32 PA(smp_slave_stext),%r10
199 stw %r10,0x10(%r0) /* MEM_RENDEZ */
200 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
201
202 /* FALLTHROUGH */
203 .procend
204
205#ifdef CONFIG_HOTPLUG_CPU
206 /* common_stext is far away in another section... jump there */
207 load32 PA(common_stext), %rp
208 bv,n (%rp)
209
210 /* common_stext and smp_slave_stext needs to be in text section */
211 .text
212#endif
213
214 /*
215 ** Code Common to both Monarch and Slave processors.
216 ** Entry:
217 **
218 ** 1.1:
219 ** %r11 must contain RFI target address.
220 ** %r25/%r26 args to pass to target function
221 ** %r2 in case rfi target decides it didn't like something
222 **
223 ** 2.0w:
224 ** %r3 PDCE_PROC address
225 ** %r11 RFI target address
226 **
227 ** Caller must init: SR4-7, %sp, %r10, %cr24/25,
228 */
229common_stext:
230 .proc
231 .callinfo
232#else
233 /* Clear PDC entry point - we won't use it */
234 stw %r0,0x10(%r0) /* MEM_RENDEZ */
235 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
236#endif /*CONFIG_SMP*/
237
238#ifdef CONFIG_64BIT
239 mfctl %cr30,%r6 /* PCX-W2 firmware bug */
240 tophys_r1 %r6
241
242 /* Save the rfi target address */
243 STREG %r11, TASK_PT_GR11(%r6)
244 /* Switch to wide mode Superdome doesn't support narrow PDC
245 ** calls.
246 */
2471: mfia %rp /* clear upper part of pcoq */
248 ldo 2f-1b(%rp),%rp
249 depdi 0,31,32,%rp
250 bv (%rp)
251 ssm PSW_SM_W,%r0
252
253 /* Set Wide mode as the "Default" (eg for traps)
254 ** First trap occurs *right* after (or part of) rfi for slave CPUs.
255 ** Someday, palo might not do this for the Monarch either.
256 */
2572:
258
259 ldo PDC_PSW(%r0),%arg0 /* 21 */
260 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
261 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
262 load32 PA(stext_pdc_ret), %rp
263 bv (%r3)
264 copy %r0,%arg3
265
266stext_pdc_ret:
267 LDREG TASK_PT_GR11(%r6), %r11
268 tovirt_r1 %r6
269 mtctl %r6,%cr30 /* restore task thread info */
270#endif
271
272#ifndef CONFIG_64BIT
273 /* clear all BTLBs */
274 ldi PDC_BLOCK_TLB,%arg0
275 load32 PA(stext_pdc_btlb_ret), %rp
276 ldw MEM_PDC_LO(%r0),%r3
277 bv (%r3)
278 ldi PDC_BTLB_PURGE_ALL,%arg1
279stext_pdc_btlb_ret:
280#endif
281
282 /* PARANOID: clear user scratch/user space SR's */
283 mtsp %r0,%sr0
284 mtsp %r0,%sr1
285 mtsp %r0,%sr2
286 mtsp %r0,%sr3
287
288 /* Initialize Protection Registers */
289 mtctl %r0,%cr8
290 mtctl %r0,%cr9
291 mtctl %r0,%cr12
292 mtctl %r0,%cr13
293
294 /* Initialize the global data pointer */
295 loadgp
296
297 /* Set up our interrupt table. HPMCs might not work after this!
298 *
299 * We need to install the correct iva for PA1.1 or PA2.0. The
300 * following short sequence of instructions can determine this
301 * (without being illegal on a PA1.1 machine).
302 */
303#ifndef CONFIG_64BIT
304 ldi 32,%r10
305 mtctl %r10,%cr11
306 .level 2.0
307 mfctl,w %cr11,%r10
308 .level 1.1
309 comib,<>,n 0,%r10,$is_pa20
310 ldil L%PA(fault_vector_11),%r10
311 b $install_iva
312 ldo R%PA(fault_vector_11)(%r10),%r10
313
314$is_pa20:
315 .level PA_ASM_LEVEL /* restore 1.1 || 2.0w */
316#endif /*!CONFIG_64BIT*/
317 load32 PA(fault_vector_20),%r10
318
319$install_iva:
320 mtctl %r10,%cr14
321
322 b aligned_rfi /* Prepare to RFI! Man all the cannons! */
323 nop
324
325 .align 128
326aligned_rfi:
327 pcxt_ssm_bug
328
329 copy %r3, %arg0 /* PDCE_PROC for smp_callin() */
330
331 rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
332 /* Don't need NOPs, have 8 compliant insn before rfi */
333
334 mtctl %r0,%cr17 /* Clear IIASQ tail */
335 mtctl %r0,%cr17 /* Clear IIASQ head */
336
337 /* Load RFI target into PC queue */
338 mtctl %r11,%cr18 /* IIAOQ head */
339 ldo 4(%r11),%r11
340 mtctl %r11,%cr18 /* IIAOQ tail */
341
342 load32 KERNEL_PSW,%r10
343 mtctl %r10,%ipsw
344
345 tovirt_r1 %sp
346
347 /* Jump through hyperspace to Virt Mode */
348 rfi
349 nop
350
351 .procend
352
353#ifdef CONFIG_SMP
354
355 .import smp_init_current_idle_task,data
356 .import smp_callin,code
357
358#ifndef CONFIG_64BIT
359smp_callin_rtn:
360 .proc
361 .callinfo
362 break 1,1 /* Break if returned from start_secondary */
363 nop
364 nop
365 .procend
366#endif /*!CONFIG_64BIT*/
367
368/***************************************************************************
369* smp_slave_stext is executed by all non-monarch Processors when the Monarch
370* pokes the slave CPUs in smp.c:smp_boot_cpus().
371*
372* Once here, registers values are initialized in order to branch to virtual
373* mode. Once all available/eligible CPUs are in virtual mode, all are
374* released and start out by executing their own idle task.
375*****************************************************************************/
376smp_slave_stext:
377 .proc
378 .callinfo
379
380 /*
381 ** Initialize Space registers
382 */
383 mtsp %r0,%sr4
384 mtsp %r0,%sr5
385 mtsp %r0,%sr6
386 mtsp %r0,%sr7
387
388#ifdef CONFIG_64BIT
389 /*
390 * Enable Wide mode early, in case the task_struct for the idle
391 * task in smp_init_current_idle_task was allocated above 4GB.
392 */
3931: mfia %rp /* clear upper part of pcoq */
394 ldo 2f-1b(%rp),%rp
395 depdi 0,31,32,%rp
396 bv (%rp)
397 ssm PSW_SM_W,%r0
3982:
399#endif
400
401 /* Initialize the SP - monarch sets up smp_init_current_idle_task */
402 load32 PA(smp_init_current_idle_task),%r6
403 LDREG 0(%r6),%r6
404 mtctl %r6,%cr30
405 tophys_r1 %r6
406 LDREG TASK_STACK(%r6),%sp
407 tophys_r1 %sp
408 ldo FRAME_SIZE(%sp),%sp
409
410 /* point CPU to kernel page tables */
411 load32 PA(swapper_pg_dir),%r4
412 mtctl %r4,%cr24 /* Initialize kernel root pointer */
413 mtctl %r4,%cr25 /* Initialize user root pointer */
414
415#ifdef CONFIG_64BIT
416 /* Setup PDCE_PROC entry */
417 copy %arg0,%r3
418#else
419 /* Load RFI *return* address in case smp_callin bails */
420 load32 smp_callin_rtn,%r2
421#endif
422
423 /* Load RFI target address. */
424 load32 smp_callin,%r11
425
426 /* ok...common code can handle the rest */
427 b common_stext
428 nop
429
430 .procend
431#endif /* CONFIG_SMP */
432
433#ifndef CONFIG_64BIT
434 .section .data..ro_after_init
435
436 .align 4
437 .export $global$,data
438
439 .type $global$,@object
440 .size $global$,4
441$global$:
442 .word 0
443#endif /*!CONFIG_64BIT*/
1/* This file is subject to the terms and conditions of the GNU General Public
2 * License. See the file "COPYING" in the main directory of this archive
3 * for more details.
4 *
5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
11 *
12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
13 */
14
15#include <asm/asm-offsets.h>
16#include <asm/psw.h>
17#include <asm/pdc.h>
18
19#include <asm/assembly.h>
20#include <asm/pgtable.h>
21
22#include <linux/linkage.h>
23#include <linux/init.h>
24
25 .level LEVEL
26
27 __INITDATA
28ENTRY(boot_args)
29 .word 0 /* arg0 */
30 .word 0 /* arg1 */
31 .word 0 /* arg2 */
32 .word 0 /* arg3 */
33END(boot_args)
34
35 __HEAD
36
37 .align 4
38 .import init_thread_union,data
39 .import fault_vector_20,code /* IVA parisc 2.0 32 bit */
40#ifndef CONFIG_64BIT
41 .import fault_vector_11,code /* IVA parisc 1.1 32 bit */
42 .import $global$ /* forward declaration */
43#endif /*!CONFIG_64BIT*/
44 .export _stext,data /* Kernel want it this way! */
45_stext:
46ENTRY(stext)
47 .proc
48 .callinfo
49
50 /* Make sure sr4-sr7 are set to zero for the kernel address space */
51 mtsp %r0,%sr4
52 mtsp %r0,%sr5
53 mtsp %r0,%sr6
54 mtsp %r0,%sr7
55
56 /* Clear BSS (shouldn't the boot loader do this?) */
57
58 .import __bss_start,data
59 .import __bss_stop,data
60
61 load32 PA(__bss_start),%r3
62 load32 PA(__bss_stop),%r4
63$bss_loop:
64 cmpb,<<,n %r3,%r4,$bss_loop
65 stw,ma %r0,4(%r3)
66
67 /* Save away the arguments the boot loader passed in (32 bit args) */
68 load32 PA(boot_args),%r1
69 stw,ma %arg0,4(%r1)
70 stw,ma %arg1,4(%r1)
71 stw,ma %arg2,4(%r1)
72 stw,ma %arg3,4(%r1)
73
74 /* Initialize startup VM. Just map first 8/16 MB of memory */
75 load32 PA(swapper_pg_dir),%r4
76 mtctl %r4,%cr24 /* Initialize kernel root pointer */
77 mtctl %r4,%cr25 /* Initialize user root pointer */
78
79#if PT_NLEVELS == 3
80 /* Set pmd in pgd */
81 load32 PA(pmd0),%r5
82 shrd %r5,PxD_VALUE_SHIFT,%r3
83 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
84 stw %r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
85 ldo ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
86#else
87 /* 2-level page table, so pmd == pgd */
88 ldo ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
89#endif
90
91 /* Fill in pmd with enough pte directories */
92 load32 PA(pg0),%r1
93 SHRREG %r1,PxD_VALUE_SHIFT,%r3
94 ldo (PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
95
96 ldi ASM_PT_INITIAL,%r1
97
981:
99 stw %r3,0(%r4)
100 ldo (PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
101 addib,> -1,%r1,1b
102#if PT_NLEVELS == 3
103 ldo ASM_PMD_ENTRY_SIZE(%r4),%r4
104#else
105 ldo ASM_PGD_ENTRY_SIZE(%r4),%r4
106#endif
107
108
109 /* Now initialize the PTEs themselves. We use RWX for
110 * everything ... it will get remapped correctly later */
111 ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
112 ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
113 load32 PA(pg0),%r1
114
115$pgt_fill_loop:
116 STREGM %r3,ASM_PTE_ENTRY_SIZE(%r1)
117 ldo (1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
118 addib,> -1,%r11,$pgt_fill_loop
119 nop
120
121 /* Load the return address...er...crash 'n burn */
122 copy %r0,%r2
123
124 /* And the RFI Target address too */
125 load32 start_parisc,%r11
126
127 /* And the initial task pointer */
128 load32 init_thread_union,%r6
129 mtctl %r6,%cr30
130
131 /* And the stack pointer too */
132 ldo THREAD_SZ_ALGN(%r6),%sp
133
134#ifdef CONFIG_SMP
135 /* Set the smp rendezvous address into page zero.
136 ** It would be safer to do this in init_smp_config() but
137 ** it's just way easier to deal with here because
138 ** of 64-bit function ptrs and the address is local to this file.
139 */
140 load32 PA(smp_slave_stext),%r10
141 stw %r10,0x10(%r0) /* MEM_RENDEZ */
142 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI - assume addr < 4GB */
143
144 /* FALLTHROUGH */
145 .procend
146
147 /*
148 ** Code Common to both Monarch and Slave processors.
149 ** Entry:
150 **
151 ** 1.1:
152 ** %r11 must contain RFI target address.
153 ** %r25/%r26 args to pass to target function
154 ** %r2 in case rfi target decides it didn't like something
155 **
156 ** 2.0w:
157 ** %r3 PDCE_PROC address
158 ** %r11 RFI target address
159 **
160 ** Caller must init: SR4-7, %sp, %r10, %cr24/25,
161 */
162common_stext:
163 .proc
164 .callinfo
165#else
166 /* Clear PDC entry point - we won't use it */
167 stw %r0,0x10(%r0) /* MEM_RENDEZ */
168 stw %r0,0x28(%r0) /* MEM_RENDEZ_HI */
169#endif /*CONFIG_SMP*/
170
171#ifdef CONFIG_64BIT
172 tophys_r1 %sp
173
174 /* Save the rfi target address */
175 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
176 tophys_r1 %r10
177 std %r11, TASK_PT_GR11(%r10)
178 /* Switch to wide mode Superdome doesn't support narrow PDC
179 ** calls.
180 */
1811: mfia %rp /* clear upper part of pcoq */
182 ldo 2f-1b(%rp),%rp
183 depdi 0,31,32,%rp
184 bv (%rp)
185 ssm PSW_SM_W,%r0
186
187 /* Set Wide mode as the "Default" (eg for traps)
188 ** First trap occurs *right* after (or part of) rfi for slave CPUs.
189 ** Someday, palo might not do this for the Monarch either.
190 */
1912:
192#define MEM_PDC_LO 0x388
193#define MEM_PDC_HI 0x35C
194 ldw MEM_PDC_LO(%r0),%r3
195 ldw MEM_PDC_HI(%r0),%r6
196 depd %r6, 31, 32, %r3 /* move to upper word */
197
198 ldo PDC_PSW(%r0),%arg0 /* 21 */
199 ldo PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
200 ldo PDC_PSW_WIDE_BIT(%r0),%arg2 /* 2 */
201 load32 PA(stext_pdc_ret), %rp
202 bv (%r3)
203 copy %r0,%arg3
204
205stext_pdc_ret:
206 /* restore rfi target address*/
207 ldd TI_TASK-THREAD_SZ_ALGN(%sp), %r10
208 tophys_r1 %r10
209 ldd TASK_PT_GR11(%r10), %r11
210 tovirt_r1 %sp
211#endif
212
213 /* PARANOID: clear user scratch/user space SR's */
214 mtsp %r0,%sr0
215 mtsp %r0,%sr1
216 mtsp %r0,%sr2
217 mtsp %r0,%sr3
218
219 /* Initialize Protection Registers */
220 mtctl %r0,%cr8
221 mtctl %r0,%cr9
222 mtctl %r0,%cr12
223 mtctl %r0,%cr13
224
225 /* Initialize the global data pointer */
226 loadgp
227
228 /* Set up our interrupt table. HPMCs might not work after this!
229 *
230 * We need to install the correct iva for PA1.1 or PA2.0. The
231 * following short sequence of instructions can determine this
232 * (without being illegal on a PA1.1 machine).
233 */
234#ifndef CONFIG_64BIT
235 ldi 32,%r10
236 mtctl %r10,%cr11
237 .level 2.0
238 mfctl,w %cr11,%r10
239 .level 1.1
240 comib,<>,n 0,%r10,$is_pa20
241 ldil L%PA(fault_vector_11),%r10
242 b $install_iva
243 ldo R%PA(fault_vector_11)(%r10),%r10
244
245$is_pa20:
246 .level LEVEL /* restore 1.1 || 2.0w */
247#endif /*!CONFIG_64BIT*/
248 load32 PA(fault_vector_20),%r10
249
250$install_iva:
251 mtctl %r10,%cr14
252
253 b aligned_rfi /* Prepare to RFI! Man all the cannons! */
254 nop
255
256 .align 128
257aligned_rfi:
258 pcxt_ssm_bug
259
260 rsm PSW_SM_QUIET,%r0 /* off troublesome PSW bits */
261 /* Don't need NOPs, have 8 compliant insn before rfi */
262
263 mtctl %r0,%cr17 /* Clear IIASQ tail */
264 mtctl %r0,%cr17 /* Clear IIASQ head */
265
266 /* Load RFI target into PC queue */
267 mtctl %r11,%cr18 /* IIAOQ head */
268 ldo 4(%r11),%r11
269 mtctl %r11,%cr18 /* IIAOQ tail */
270
271 load32 KERNEL_PSW,%r10
272 mtctl %r10,%ipsw
273
274 /* Jump through hyperspace to Virt Mode */
275 rfi
276 nop
277
278 .procend
279
280#ifdef CONFIG_SMP
281
282 .import smp_init_current_idle_task,data
283 .import smp_callin,code
284
285#ifndef CONFIG_64BIT
286smp_callin_rtn:
287 .proc
288 .callinfo
289 break 1,1 /* Break if returned from start_secondary */
290 nop
291 nop
292 .procend
293#endif /*!CONFIG_64BIT*/
294
295/***************************************************************************
296* smp_slave_stext is executed by all non-monarch Processors when the Monarch
297* pokes the slave CPUs in smp.c:smp_boot_cpus().
298*
299* Once here, registers values are initialized in order to branch to virtual
300* mode. Once all available/eligible CPUs are in virtual mode, all are
301* released and start out by executing their own idle task.
302*****************************************************************************/
303smp_slave_stext:
304 .proc
305 .callinfo
306
307 /*
308 ** Initialize Space registers
309 */
310 mtsp %r0,%sr4
311 mtsp %r0,%sr5
312 mtsp %r0,%sr6
313 mtsp %r0,%sr7
314
315 /* Initialize the SP - monarch sets up smp_init_current_idle_task */
316 load32 PA(smp_init_current_idle_task),%sp
317 LDREG 0(%sp),%sp /* load task address */
318 tophys_r1 %sp
319 LDREG TASK_THREAD_INFO(%sp),%sp
320 mtctl %sp,%cr30 /* store in cr30 */
321 ldo THREAD_SZ_ALGN(%sp),%sp
322
323 /* point CPU to kernel page tables */
324 load32 PA(swapper_pg_dir),%r4
325 mtctl %r4,%cr24 /* Initialize kernel root pointer */
326 mtctl %r4,%cr25 /* Initialize user root pointer */
327
328#ifdef CONFIG_64BIT
329 /* Setup PDCE_PROC entry */
330 copy %arg0,%r3
331#else
332 /* Load RFI *return* address in case smp_callin bails */
333 load32 smp_callin_rtn,%r2
334#endif
335
336 /* Load RFI target address. */
337 load32 smp_callin,%r11
338
339 /* ok...common code can handle the rest */
340 b common_stext
341 nop
342
343 .procend
344#endif /* CONFIG_SMP */
345
346ENDPROC(stext)
347
348#ifndef CONFIG_64BIT
349 .section .data..read_mostly
350
351 .align 4
352 .export $global$,data
353
354 .type $global$,@object
355 .size $global$,4
356$global$:
357 .word 0
358#endif /*!CONFIG_64BIT*/