Linux Audio

Check our new training course

Loading...
v3.5.6
  1/* This file is subject to the terms and conditions of the GNU General Public
  2 * License.  See the file "COPYING" in the main directory of this archive
  3 * for more details.
  4 *
  5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
  6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
  7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
  8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
  9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
 10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
 11 *
 12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
 13 */
 14
 15#include <asm/asm-offsets.h>
 16#include <asm/psw.h>
 17#include <asm/pdc.h>
 18	
 19#include <asm/assembly.h>
 20#include <asm/pgtable.h>
 21
 22#include <linux/linkage.h>
 23#include <linux/init.h>
 24
 25	.level	LEVEL
 26
 27	__INITDATA
 28ENTRY(boot_args)
 29	.word 0 /* arg0 */
 30	.word 0 /* arg1 */
 31	.word 0 /* arg2 */
 32	.word 0 /* arg3 */
 33END(boot_args)
 34
 35	__HEAD
 36
 37	.align	4
 38	.import init_thread_union,data
 39	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
 40#ifndef CONFIG_64BIT
 41        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
 42	.import	$global$		/* forward declaration */
 43#endif /*!CONFIG_64BIT*/
 44	.export _stext,data		/* Kernel want it this way! */
 45_stext:
 46ENTRY(stext)
 47	.proc
 48	.callinfo
 49
 50	/* Make sure sr4-sr7 are set to zero for the kernel address space */
 51	mtsp	%r0,%sr4
 52	mtsp	%r0,%sr5
 53	mtsp	%r0,%sr6
 54	mtsp	%r0,%sr7
 55
 56	/* Clear BSS (shouldn't the boot loader do this?) */
 57
 58	.import __bss_start,data
 59	.import __bss_stop,data
 60
 61	load32		PA(__bss_start),%r3
 62	load32		PA(__bss_stop),%r4
 63$bss_loop:
 64	cmpb,<<,n       %r3,%r4,$bss_loop
 65	stw,ma          %r0,4(%r3)
 66
 67	/* Save away the arguments the boot loader passed in (32 bit args) */
 68	load32		PA(boot_args),%r1
 69	stw,ma          %arg0,4(%r1)
 70	stw,ma          %arg1,4(%r1)
 71	stw,ma          %arg2,4(%r1)
 72	stw,ma          %arg3,4(%r1)
 73
 74	/* Initialize startup VM. Just map first 8/16 MB of memory */
 75	load32		PA(swapper_pg_dir),%r4
 76	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
 77	mtctl		%r4,%cr25	/* Initialize user root pointer */
 78
 79#if PT_NLEVELS == 3
 80	/* Set pmd in pgd */
 81	load32		PA(pmd0),%r5
 82	shrd            %r5,PxD_VALUE_SHIFT,%r3	
 83	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
 84	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
 85	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
 86#else
 87	/* 2-level page table, so pmd == pgd */
 88	ldo		ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
 89#endif
 90
 91	/* Fill in pmd with enough pte directories */
 92	load32		PA(pg0),%r1
 93	SHRREG		%r1,PxD_VALUE_SHIFT,%r3
 94	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
 95
 96	ldi		ASM_PT_INITIAL,%r1
 97
 981:
 99	stw		%r3,0(%r4)
100	ldo		(PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
101	addib,>		-1,%r1,1b
102#if PT_NLEVELS == 3
103	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
104#else
105	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
106#endif
107
108
109	/* Now initialize the PTEs themselves.  We use RWX for
110	 * everything ... it will get remapped correctly later */
111	ldo		0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
112	ldi		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
113	load32		PA(pg0),%r1
114
115$pgt_fill_loop:
116	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
117	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
118	addib,>		-1,%r11,$pgt_fill_loop
119	nop
120
121	/* Load the return address...er...crash 'n burn */
122	copy		%r0,%r2
123
124	/* And the RFI Target address too */
125	load32		start_parisc,%r11
126
127	/* And the initial task pointer */
128	load32		init_thread_union,%r6
129	mtctl           %r6,%cr30
130
131	/* And the stack pointer too */
132	ldo             THREAD_SZ_ALGN(%r6),%sp
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134#ifdef CONFIG_SMP
135	/* Set the smp rendezvous address into page zero.
136	** It would be safer to do this in init_smp_config() but
137	** it's just way easier to deal with here because
138	** of 64-bit function ptrs and the address is local to this file.
139	*/
140	load32		PA(smp_slave_stext),%r10
141	stw		%r10,0x10(%r0)	/* MEM_RENDEZ */
142	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI - assume addr < 4GB */
143
144	/* FALLTHROUGH */
145	.procend
146
147	/*
148	** Code Common to both Monarch and Slave processors.
149	** Entry:
150	**
151	**  1.1:	
152	**    %r11 must contain RFI target address.
153	**    %r25/%r26 args to pass to target function
154	**    %r2  in case rfi target decides it didn't like something
155	**
156	**  2.0w:
157	**    %r3  PDCE_PROC address
158	**    %r11 RFI target address
159	**
160	** Caller must init: SR4-7, %sp, %r10, %cr24/25, 
161	*/
162common_stext:
163	.proc
164	.callinfo
165#else
166	/* Clear PDC entry point - we won't use it */
167	stw		%r0,0x10(%r0)	/* MEM_RENDEZ */
168	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
169#endif /*CONFIG_SMP*/
170
171#ifdef CONFIG_64BIT
172	tophys_r1	%sp
173
174	/* Save the rfi target address */
175	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
176	tophys_r1       %r10
177	std             %r11,  TASK_PT_GR11(%r10)
178	/* Switch to wide mode Superdome doesn't support narrow PDC
179	** calls.
180	*/
1811:	mfia            %rp             /* clear upper part of pcoq */
182	ldo             2f-1b(%rp),%rp
183	depdi           0,31,32,%rp
184	bv              (%rp)
185	ssm             PSW_SM_W,%r0
186
187        /* Set Wide mode as the "Default" (eg for traps)
188        ** First trap occurs *right* after (or part of) rfi for slave CPUs.
189        ** Someday, palo might not do this for the Monarch either.
190        */
1912:
192#define MEM_PDC_LO 0x388
193#define MEM_PDC_HI 0x35C
194	ldw             MEM_PDC_LO(%r0),%r3
195	ldw             MEM_PDC_HI(%r0),%r6
196	depd            %r6, 31, 32, %r3        /* move to upper word */
197
198	ldo             PDC_PSW(%r0),%arg0              /* 21 */
199	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
200	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
201	load32          PA(stext_pdc_ret), %rp
202	bv              (%r3)
203	copy            %r0,%arg3
204
205stext_pdc_ret:
 
 
206	/* restore rfi target address*/
207	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
208	tophys_r1       %r10
209	ldd             TASK_PT_GR11(%r10), %r11
210	tovirt_r1       %sp
211#endif
212	
213	/* PARANOID: clear user scratch/user space SR's */
214	mtsp	%r0,%sr0
215	mtsp	%r0,%sr1
216	mtsp	%r0,%sr2
217	mtsp	%r0,%sr3
218
219	/* Initialize Protection Registers */
220	mtctl	%r0,%cr8
221	mtctl	%r0,%cr9
222	mtctl	%r0,%cr12
223	mtctl	%r0,%cr13
224
225	/* Initialize the global data pointer */
226	loadgp
227
228	/* Set up our interrupt table.  HPMCs might not work after this! 
229	 *
230	 * We need to install the correct iva for PA1.1 or PA2.0. The
231	 * following short sequence of instructions can determine this
232	 * (without being illegal on a PA1.1 machine).
233	 */
234#ifndef CONFIG_64BIT
235	ldi		32,%r10
236	mtctl		%r10,%cr11
237	.level 2.0
238	mfctl,w		%cr11,%r10
239	.level 1.1
240	comib,<>,n	0,%r10,$is_pa20
241	ldil		L%PA(fault_vector_11),%r10
242	b		$install_iva
243	ldo		R%PA(fault_vector_11)(%r10),%r10
244
245$is_pa20:
246	.level		LEVEL /* restore 1.1 || 2.0w */
247#endif /*!CONFIG_64BIT*/
248	load32		PA(fault_vector_20),%r10
249
250$install_iva:
251	mtctl		%r10,%cr14
252
253	b		aligned_rfi  /* Prepare to RFI! Man all the cannons! */
254	nop
255
256	.align 128
257aligned_rfi:
258	pcxt_ssm_bug
259
 
 
260	rsm		PSW_SM_QUIET,%r0	/* off troublesome PSW bits */
261	/* Don't need NOPs, have 8 compliant insn before rfi */
262
263	mtctl		%r0,%cr17	/* Clear IIASQ tail */
264	mtctl		%r0,%cr17	/* Clear IIASQ head */
265
266	/* Load RFI target into PC queue */
267	mtctl		%r11,%cr18	/* IIAOQ head */
268	ldo		4(%r11),%r11
269	mtctl		%r11,%cr18	/* IIAOQ tail */
270
271	load32		KERNEL_PSW,%r10
272	mtctl		%r10,%ipsw
273	
274	/* Jump through hyperspace to Virt Mode */
275	rfi
276	nop
277
278	.procend
279
280#ifdef CONFIG_SMP
281
282	.import smp_init_current_idle_task,data
283	.import	smp_callin,code
284
285#ifndef CONFIG_64BIT
286smp_callin_rtn:
287        .proc
288	.callinfo
289	break	1,1		/*  Break if returned from start_secondary */
290	nop
291	nop
292        .procend
293#endif /*!CONFIG_64BIT*/
294
295/***************************************************************************
296* smp_slave_stext is executed by all non-monarch Processors when the Monarch
297* pokes the slave CPUs in smp.c:smp_boot_cpus().
298*
299* Once here, registers values are initialized in order to branch to virtual
300* mode. Once all available/eligible CPUs are in virtual mode, all are
301* released and start out by executing their own idle task.
302*****************************************************************************/
303smp_slave_stext:
304        .proc
305	.callinfo
306
307	/*
308	** Initialize Space registers
309	*/
310	mtsp	   %r0,%sr4
311	mtsp	   %r0,%sr5
312	mtsp	   %r0,%sr6
313	mtsp	   %r0,%sr7
314
315	/*  Initialize the SP - monarch sets up smp_init_current_idle_task */
316	load32		PA(smp_init_current_idle_task),%sp
317	LDREG		0(%sp),%sp	/* load task address */
318	tophys_r1	%sp
319	LDREG		TASK_THREAD_INFO(%sp),%sp
320	mtctl           %sp,%cr30       /* store in cr30 */
321	ldo             THREAD_SZ_ALGN(%sp),%sp
322
323	/* point CPU to kernel page tables */
324	load32		PA(swapper_pg_dir),%r4
325	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
326	mtctl		%r4,%cr25	/* Initialize user root pointer */
327
328#ifdef CONFIG_64BIT
329	/* Setup PDCE_PROC entry */
330	copy            %arg0,%r3
331#else
332	/* Load RFI *return* address in case smp_callin bails */
333	load32		smp_callin_rtn,%r2
334#endif
335	
336	/* Load RFI target address.  */
337	load32		smp_callin,%r11
338	
339	/* ok...common code can handle the rest */
340	b		common_stext
341	nop
342
343	.procend
344#endif /* CONFIG_SMP */
345
346ENDPROC(stext)
347
348#ifndef CONFIG_64BIT
349	.section .data..read_mostly
350
351	.align	4
352	.export	$global$,data
353
354	.type	$global$,@object
355	.size	$global$,4
356$global$:	
357	.word 0
358#endif /*!CONFIG_64BIT*/
v4.17
  1/* This file is subject to the terms and conditions of the GNU General Public
  2 * License.  See the file "COPYING" in the main directory of this archive
  3 * for more details.
  4 *
  5 * Copyright (C) 1999-2007 by Helge Deller <deller@gmx.de>
  6 * Copyright 1999 SuSE GmbH (Philipp Rumpf)
  7 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
  8 * Copyright 2000 Hewlett Packard (Paul Bame, bame@puffin.external.hp.com)
  9 * Copyright (C) 2001 Grant Grundler (Hewlett Packard)
 10 * Copyright (C) 2004 Kyle McMartin <kyle@debian.org>
 11 *
 12 * Initial Version 04-23-1999 by Helge Deller <deller@gmx.de>
 13 */
 14
 15#include <asm/asm-offsets.h>
 16#include <asm/psw.h>
 17#include <asm/pdc.h>
 18	
 19#include <asm/assembly.h>
 20#include <asm/pgtable.h>
 21
 22#include <linux/linkage.h>
 23#include <linux/init.h>
 24
 25	.level	LEVEL
 26
 27	__INITDATA
 28ENTRY(boot_args)
 29	.word 0 /* arg0 */
 30	.word 0 /* arg1 */
 31	.word 0 /* arg2 */
 32	.word 0 /* arg3 */
 33END(boot_args)
 34
 35	__HEAD
 36
 37	.align	4
 38	.import init_thread_union,data
 39	.import fault_vector_20,code    /* IVA parisc 2.0 32 bit */
 40#ifndef CONFIG_64BIT
 41        .import fault_vector_11,code    /* IVA parisc 1.1 32 bit */
 42	.import	$global$		/* forward declaration */
 43#endif /*!CONFIG_64BIT*/
 44ENTRY(parisc_kernel_start)
 
 
 45	.proc
 46	.callinfo
 47
 48	/* Make sure sr4-sr7 are set to zero for the kernel address space */
 49	mtsp	%r0,%sr4
 50	mtsp	%r0,%sr5
 51	mtsp	%r0,%sr6
 52	mtsp	%r0,%sr7
 53
 54	/* Clear BSS (shouldn't the boot loader do this?) */
 55
 56	.import __bss_start,data
 57	.import __bss_stop,data
 58
 59	load32		PA(__bss_start),%r3
 60	load32		PA(__bss_stop),%r4
 61$bss_loop:
 62	cmpb,<<,n       %r3,%r4,$bss_loop
 63	stw,ma          %r0,4(%r3)
 64
 65	/* Save away the arguments the boot loader passed in (32 bit args) */
 66	load32		PA(boot_args),%r1
 67	stw,ma          %arg0,4(%r1)
 68	stw,ma          %arg1,4(%r1)
 69	stw,ma          %arg2,4(%r1)
 70	stw,ma          %arg3,4(%r1)
 71
 72	/* Initialize startup VM. Just map first 16/32 MB of memory */
 73	load32		PA(swapper_pg_dir),%r4
 74	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
 75	mtctl		%r4,%cr25	/* Initialize user root pointer */
 76
 77#if CONFIG_PGTABLE_LEVELS == 3
 78	/* Set pmd in pgd */
 79	load32		PA(pmd0),%r5
 80	shrd            %r5,PxD_VALUE_SHIFT,%r3	
 81	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
 82	stw		%r3,ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4)
 83	ldo		ASM_PMD_ENTRY*ASM_PMD_ENTRY_SIZE(%r5),%r4
 84#else
 85	/* 2-level page table, so pmd == pgd */
 86	ldo		ASM_PGD_ENTRY*ASM_PGD_ENTRY_SIZE(%r4),%r4
 87#endif
 88
 89	/* Fill in pmd with enough pte directories */
 90	load32		PA(pg0),%r1
 91	SHRREG		%r1,PxD_VALUE_SHIFT,%r3
 92	ldo		(PxD_FLAG_PRESENT+PxD_FLAG_VALID)(%r3),%r3
 93
 94	ldi		ASM_PT_INITIAL,%r1
 95
 961:
 97	stw		%r3,0(%r4)
 98	ldo		(PAGE_SIZE >> PxD_VALUE_SHIFT)(%r3),%r3
 99	addib,>		-1,%r1,1b
100#if CONFIG_PGTABLE_LEVELS == 3
101	ldo             ASM_PMD_ENTRY_SIZE(%r4),%r4
102#else
103	ldo             ASM_PGD_ENTRY_SIZE(%r4),%r4
104#endif
105
106
107	/* Now initialize the PTEs themselves.  We use RWX for
108	 * everything ... it will get remapped correctly later */
109	ldo		0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
110	load32		(1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
111	load32		PA(pg0),%r1
112
113$pgt_fill_loop:
114	STREGM          %r3,ASM_PTE_ENTRY_SIZE(%r1)
115	ldo		(1<<PFN_PTE_SHIFT)(%r3),%r3 /* add one PFN */
116	addib,>		-1,%r11,$pgt_fill_loop
117	nop
118
119	/* Load the return address...er...crash 'n burn */
120	copy		%r0,%r2
121
122	/* And the RFI Target address too */
123	load32		start_parisc,%r11
124
125	/* And the initial task pointer */
126	load32		init_thread_union,%r6
127	mtctl           %r6,%cr30
128
129	/* And the stack pointer too */
130	ldo             THREAD_SZ_ALGN(%r6),%sp
131
132#if defined(CONFIG_64BIT) && defined(CONFIG_FUNCTION_TRACER)
133	.import _mcount,data
134	/* initialize mcount FPTR */
135	/* Get the global data pointer */
136	loadgp
137	load32		PA(_mcount), %r10
138	std		%dp,0x18(%r10)
139#endif
140
141#ifdef CONFIG_64BIT
142	/* Get PDCE_PROC for monarch CPU. */
143#define MEM_PDC_LO 0x388
144#define MEM_PDC_HI 0x35C
145	ldw             MEM_PDC_LO(%r0),%r3
146	ldw             MEM_PDC_HI(%r0),%r10
147	depd            %r10, 31, 32, %r3        /* move to upper word */
148#endif
149
150
151#ifdef CONFIG_SMP
152	/* Set the smp rendezvous address into page zero.
153	** It would be safer to do this in init_smp_config() but
154	** it's just way easier to deal with here because
155	** of 64-bit function ptrs and the address is local to this file.
156	*/
157	load32		PA(smp_slave_stext),%r10
158	stw		%r10,0x10(%r0)	/* MEM_RENDEZ */
159	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI - assume addr < 4GB */
160
161	/* FALLTHROUGH */
162	.procend
163
164	/*
165	** Code Common to both Monarch and Slave processors.
166	** Entry:
167	**
168	**  1.1:	
169	**    %r11 must contain RFI target address.
170	**    %r25/%r26 args to pass to target function
171	**    %r2  in case rfi target decides it didn't like something
172	**
173	**  2.0w:
174	**    %r3  PDCE_PROC address
175	**    %r11 RFI target address
176	**
177	** Caller must init: SR4-7, %sp, %r10, %cr24/25, 
178	*/
179common_stext:
180	.proc
181	.callinfo
182#else
183	/* Clear PDC entry point - we won't use it */
184	stw		%r0,0x10(%r0)	/* MEM_RENDEZ */
185	stw		%r0,0x28(%r0)	/* MEM_RENDEZ_HI */
186#endif /*CONFIG_SMP*/
187
188#ifdef CONFIG_64BIT
189	tophys_r1	%sp
190
191	/* Save the rfi target address */
192	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
193	tophys_r1       %r10
194	std             %r11,  TASK_PT_GR11(%r10)
195	/* Switch to wide mode Superdome doesn't support narrow PDC
196	** calls.
197	*/
1981:	mfia            %rp             /* clear upper part of pcoq */
199	ldo             2f-1b(%rp),%rp
200	depdi           0,31,32,%rp
201	bv              (%rp)
202	ssm             PSW_SM_W,%r0
203
204        /* Set Wide mode as the "Default" (eg for traps)
205        ** First trap occurs *right* after (or part of) rfi for slave CPUs.
206        ** Someday, palo might not do this for the Monarch either.
207        */
2082:
209	mfctl		%cr30,%r6		/* PCX-W2 firmware bug */
 
 
 
 
210
211	ldo             PDC_PSW(%r0),%arg0              /* 21 */
212	ldo             PDC_PSW_SET_DEFAULTS(%r0),%arg1 /* 2 */
213	ldo             PDC_PSW_WIDE_BIT(%r0),%arg2     /* 2 */
214	load32          PA(stext_pdc_ret), %rp
215	bv              (%r3)
216	copy            %r0,%arg3
217
218stext_pdc_ret:
219	mtctl		%r6,%cr30		/* restore task thread info */
220
221	/* restore rfi target address*/
222	ldd             TI_TASK-THREAD_SZ_ALGN(%sp), %r10
223	tophys_r1       %r10
224	ldd             TASK_PT_GR11(%r10), %r11
225	tovirt_r1       %sp
226#endif
227	
228	/* PARANOID: clear user scratch/user space SR's */
229	mtsp	%r0,%sr0
230	mtsp	%r0,%sr1
231	mtsp	%r0,%sr2
232	mtsp	%r0,%sr3
233
234	/* Initialize Protection Registers */
235	mtctl	%r0,%cr8
236	mtctl	%r0,%cr9
237	mtctl	%r0,%cr12
238	mtctl	%r0,%cr13
239
240	/* Initialize the global data pointer */
241	loadgp
242
243	/* Set up our interrupt table.  HPMCs might not work after this! 
244	 *
245	 * We need to install the correct iva for PA1.1 or PA2.0. The
246	 * following short sequence of instructions can determine this
247	 * (without being illegal on a PA1.1 machine).
248	 */
249#ifndef CONFIG_64BIT
250	ldi		32,%r10
251	mtctl		%r10,%cr11
252	.level 2.0
253	mfctl,w		%cr11,%r10
254	.level 1.1
255	comib,<>,n	0,%r10,$is_pa20
256	ldil		L%PA(fault_vector_11),%r10
257	b		$install_iva
258	ldo		R%PA(fault_vector_11)(%r10),%r10
259
260$is_pa20:
261	.level		LEVEL /* restore 1.1 || 2.0w */
262#endif /*!CONFIG_64BIT*/
263	load32		PA(fault_vector_20),%r10
264
265$install_iva:
266	mtctl		%r10,%cr14
267
268	b		aligned_rfi  /* Prepare to RFI! Man all the cannons! */
269	nop
270
271	.align 128
272aligned_rfi:
273	pcxt_ssm_bug
274
275	copy		%r3, %arg0	/* PDCE_PROC for smp_callin() */
276
277	rsm		PSW_SM_QUIET,%r0	/* off troublesome PSW bits */
278	/* Don't need NOPs, have 8 compliant insn before rfi */
279
280	mtctl		%r0,%cr17	/* Clear IIASQ tail */
281	mtctl		%r0,%cr17	/* Clear IIASQ head */
282
283	/* Load RFI target into PC queue */
284	mtctl		%r11,%cr18	/* IIAOQ head */
285	ldo		4(%r11),%r11
286	mtctl		%r11,%cr18	/* IIAOQ tail */
287
288	load32		KERNEL_PSW,%r10
289	mtctl		%r10,%ipsw
290	
291	/* Jump through hyperspace to Virt Mode */
292	rfi
293	nop
294
295	.procend
296
297#ifdef CONFIG_SMP
298
299	.import smp_init_current_idle_task,data
300	.import	smp_callin,code
301
302#ifndef CONFIG_64BIT
303smp_callin_rtn:
304        .proc
305	.callinfo
306	break	1,1		/*  Break if returned from start_secondary */
307	nop
308	nop
309        .procend
310#endif /*!CONFIG_64BIT*/
311
312/***************************************************************************
313* smp_slave_stext is executed by all non-monarch Processors when the Monarch
314* pokes the slave CPUs in smp.c:smp_boot_cpus().
315*
316* Once here, registers values are initialized in order to branch to virtual
317* mode. Once all available/eligible CPUs are in virtual mode, all are
318* released and start out by executing their own idle task.
319*****************************************************************************/
320smp_slave_stext:
321        .proc
322	.callinfo
323
324	/*
325	** Initialize Space registers
326	*/
327	mtsp	   %r0,%sr4
328	mtsp	   %r0,%sr5
329	mtsp	   %r0,%sr6
330	mtsp	   %r0,%sr7
331
332	/*  Initialize the SP - monarch sets up smp_init_current_idle_task */
333	load32		PA(smp_init_current_idle_task),%sp
334	LDREG		0(%sp),%sp	/* load task address */
335	tophys_r1	%sp
336	LDREG		TASK_THREAD_INFO(%sp),%sp
337	mtctl           %sp,%cr30       /* store in cr30 */
338	ldo             THREAD_SZ_ALGN(%sp),%sp
339
340	/* point CPU to kernel page tables */
341	load32		PA(swapper_pg_dir),%r4
342	mtctl		%r4,%cr24	/* Initialize kernel root pointer */
343	mtctl		%r4,%cr25	/* Initialize user root pointer */
344
345#ifdef CONFIG_64BIT
346	/* Setup PDCE_PROC entry */
347	copy            %arg0,%r3
348#else
349	/* Load RFI *return* address in case smp_callin bails */
350	load32		smp_callin_rtn,%r2
351#endif
352	
353	/* Load RFI target address.  */
354	load32		smp_callin,%r11
355	
356	/* ok...common code can handle the rest */
357	b		common_stext
358	nop
359
360	.procend
361#endif /* CONFIG_SMP */
362
363ENDPROC(parisc_kernel_start)
364
365#ifndef CONFIG_64BIT
366	.section .data..read_mostly
367
368	.align	4
369	.export	$global$,data
370
371	.type	$global$,@object
372	.size	$global$,4
373$global$:	
374	.word 0
375#endif /*!CONFIG_64BIT*/