Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Early kernel startup code for Hexagon
  4 *
  5 * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  6 */
  7
  8#include <linux/linkage.h>
  9#include <linux/init.h>
 10#include <asm/asm-offsets.h>
 11#include <asm/mem-layout.h>
 12#include <asm/vm_mmu.h>
 13#include <asm/page.h>
 14#include <asm/hexagon_vm.h>
 15
 16#define SEGTABLE_ENTRIES #0x0e0
 17
 18	__INIT
 19ENTRY(stext)
 20	/*
 21	 * VMM will already have set up true vector page, MMU, etc.
 22	 * To set up initial kernel identity map, we have to pass
 23	 * the VMM a pointer to some canonical page tables. In
 24	 * this implementation, we're assuming that we've got
 25	 * them precompiled. Generate value in R24, as we'll need
 26	 * it again shortly.
 27	 */
 28	r24.L = #LO(swapper_pg_dir)
 29	r24.H = #HI(swapper_pg_dir)
 30
 31	/*
 32	 * Symbol is kernel segment address, but we need
 33	 * the logical/physical address.
 34	 */
 35	r25 = pc;
 36	r2.h = #0xffc0;
 37	r2.l = #0x0000;
 38	r25 = and(r2,r25);	/*  R25 holds PHYS_OFFSET now  */
 39	r1.h = #HI(PAGE_OFFSET);
 40	r1.l = #LO(PAGE_OFFSET);
 41	r24 = sub(r24,r1);	/* swapper_pg_dir - PAGE_OFFSET */
 42	r24 = add(r24,r25);	/* + PHYS_OFFSET */
 43
 44	r0 = r24;  /* aka __pa(swapper_pg_dir)  */
 45
 46	/*
 47	 * Initialize page dir to make the virtual and physical
 48	 * addresses where the kernel was loaded be identical.
 49	 * Done in 4MB chunks.
 50	 */
 51#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X	\
 52		  | __HEXAGON_C_WB_L2 << 6			\
 53		  | __HVM_PDE_S_4MB)
 54
 55	/*
 56	 * Get number of VA=PA entries; only really needed for jump
 57	 * to hyperspace; gets blown away immediately after
 58	 */
 59
 60	{
 61		r1.l = #LO(_end);
 62		r2.l = #LO(stext);
 63		r3 = #1;
 64	}
 65	{
 66		r1.h = #HI(_end);
 67		r2.h = #HI(stext);
 68		r3 = asl(r3, #22);
 69	}
 70	{
 71		r1 = sub(r1, r2);
 72		r3 = add(r3, #-1);
 73	}  /* r1 =  _end - stext  */
 74	r1 = add(r1, r3);  /*  + (4M-1) */
 75	r26 = lsr(r1, #22); /*  / 4M = # of entries */
 76
 77	r1 = r25;
 78	r2.h = #0xffc0;
 79	r2.l = #0x0000;		/* round back down to 4MB boundary  */
 80	r1 = and(r1,r2);
 81	r2 = lsr(r1, #22)	/* 4MB page number		*/
 82	r2 = asl(r2, #2)	/* times sizeof(PTE) (4bytes)	*/
 83	r0 = add(r0,r2)		/* r0 = address of correct PTE	*/
 84	r2 = #PTE_BITS
 85	r1 = add(r1,r2)		/* r1 = 4MB PTE for the first entry	*/
 86	r2.h = #0x0040
 87	r2.l = #0x0000		/* 4MB increments */
 88	loop0(1f,r26);
 891:
 90	memw(r0 ++ #4) = r1
 91	{ r1 = add(r1, r2); } :endloop0
 92
 93	/*  Also need to overwrite the initial 0xc0000000 entries  */
 94	/*  PAGE_OFFSET >> (4MB shift - 4 bytes per entry shift)  */
 95	R1.H = #HI(PAGE_OFFSET >> (22 - 2))
 96	R1.L = #LO(PAGE_OFFSET >> (22 - 2))
 97
 98	r0 = add(r1, r24);	/* advance to 0xc0000000 entry */
 99	r1 = r25;
100	r2.h = #0xffc0;
101	r2.l = #0x0000;		/* round back down to 4MB boundary  */
102	r1 = and(r1,r2);	/* for huge page */
103	r2 = #PTE_BITS
104	r1 = add(r1,r2);
105	r2.h = #0x0040
106	r2.l = #0x0000		/* 4MB increments */
107
108	loop0(1f,SEGTABLE_ENTRIES);
1091:
110	memw(r0 ++ #4) = r1;
111	{ r1 = add(r1,r2); } :endloop0
112
113	r0 = r24;
114
115	/*
116	 * The subroutine wrapper around the virtual instruction touches
117	 * no memory, so we should be able to use it even here.
118	 * Note that in this version, R1 and R2 get "clobbered"; see
119	 * vm_ops.S
120	 */
121	r1 = #VM_TRANS_TYPE_TABLE
122	call	__vmnewmap;
123
124	/*  Jump into virtual address range.  */
125
126	r31.h = #hi(__head_s_vaddr_target)
127	r31.l = #lo(__head_s_vaddr_target)
128	jumpr r31
129
130	/*  Insert trippy space effects.  */
131
132__head_s_vaddr_target:
133	/*
134	 * Tear down VA=PA translation now that we are running
135	 * in kernel virtual space.
136	 */
137	r0 = #__HVM_PDE_S_INVALID
138
139	r1.h = #0xffc0;
140	r1.l = #0x0000;
141	r2 = r25;		/* phys_offset */
142	r2 = and(r1,r2);
143
144	r1.l = #lo(swapper_pg_dir)
145	r1.h = #hi(swapper_pg_dir)
146	r2 = lsr(r2, #22)	/* 4MB page number		*/
147	r2 = asl(r2, #2)	/* times sizeof(PTE) (4bytes)	*/
148	r1 = add(r1,r2);
149	loop0(1f,r26)
150
1511:
152	{
153		memw(R1 ++ #4) = R0
154	}:endloop0
155
156	r0 = r24
157	r1 = #VM_TRANS_TYPE_TABLE
158	call __vmnewmap
159
160	/*  Go ahead and install the trap0 return so angel calls work  */
161	r0.h = #hi(_K_provisional_vec)
162	r0.l = #lo(_K_provisional_vec)
163	call __vmsetvec
164
165	/*
166	 * OK, at this point we should start to be much more careful,
167	 * we're going to enter C code and start touching memory
168	 * in all sorts of places.
169	 * This means:
170	 *      SGP needs to be OK
171	 *	Need to lock shared resources
172	 *	A bunch of other things that will cause
173	 * 	all kinds of painful bugs
174	 */
175
176	/*
177	 * Stack pointer should be pointed at the init task's
178	 * thread stack, which should have been declared in arch/init_task.c.
179	 * So uhhhhh...
180	 * It's accessible via the init_thread_union, which is a union
181	 * of a thread_info struct and a stack; of course, the top
182	 * of the stack is not for you.  The end of the stack
183	 * is simply init_thread_union + THREAD_SIZE.
184	 */
185
186	{r29.H = #HI(init_thread_union); r0.H = #HI(_THREAD_SIZE); }
187	{r29.L = #LO(init_thread_union); r0.L = #LO(_THREAD_SIZE); }
188
189	/*  initialize the register used to point to current_thread_info */
190	/*  Fixme:  THREADINFO_REG can't be R2 because of that memset thing. */
191	{r29 = add(r29,r0); THREADINFO_REG = r29; }
192
193	/*  Hack:  zero bss; */
194	{ r0.L = #LO(__bss_start);  r1 = #0; r2.l = #LO(__bss_stop); }
195	{ r0.H = #HI(__bss_start);           r2.h = #HI(__bss_stop); }
196
197	r2 = sub(r2,r0);
198	call memset;
199
200	/*  Set PHYS_OFFSET; should be in R25 */
201#ifdef CONFIG_HEXAGON_PHYS_OFFSET
202	r0.l = #LO(__phys_offset);
203	r0.h = #HI(__phys_offset);
204	memw(r0) = r25;
205#endif
206
207	/* Time to make the doughnuts.   */
208	call start_kernel
209
210	/*
211	 * Should not reach here.
212	 */
2131:
214	jump 1b
215
216.p2align PAGE_SHIFT
217ENTRY(external_cmdline_buffer)
218        .fill _PAGE_SIZE,1,0
219
220.data
221.p2align PAGE_SHIFT
222ENTRY(empty_zero_page)
223        .fill _PAGE_SIZE,1,0
v3.5.6
 
  1/*
  2 * Early kernel startup code for Hexagon
  3 *
  4 * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
  5 *
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License version 2 and
  9 * only version 2 as published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program; if not, write to the Free Software
 18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
 19 * 02110-1301, USA.
 20 */
 21
 22#include <linux/linkage.h>
 23#include <linux/init.h>
 24#include <asm/asm-offsets.h>
 25#include <asm/mem-layout.h>
 26#include <asm/vm_mmu.h>
 27#include <asm/page.h>
 
 
 
 28
 29	__INIT
 30ENTRY(stext)
 31	/*
 32	 * VMM will already have set up true vector page, MMU, etc.
 33	 * To set up initial kernel identity map, we have to pass
 34	 * the VMM a pointer to some canonical page tables. In
 35	 * this implementation, we're assuming that we've got
 36	 * them precompiled. Generate value in R24, as we'll need
 37	 * it again shortly.
 38	 */
 39	r24.L = #LO(swapper_pg_dir)
 40	r24.H = #HI(swapper_pg_dir)
 41
 42	/*
 43	 * Symbol is kernel segment address, but we need
 44	 * the logical/physical address.
 45	 */
 46	r24 = asl(r24, #2)
 47	r24 = lsr(r24, #2)
 
 
 
 
 
 
 48
 49	r0 = r24
 50
 51	/*
 52	 * Initialize a 16MB PTE to make the virtual and physical
 53	 * addresses where the kernel was loaded be identical.
 
 54	 */
 55#define PTE_BITS ( __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X	\
 56		  | __HEXAGON_C_WB_L2 << 6			\
 57		  | __HVM_PDE_S_4MB)
 58
 59	r1 = pc
 60	r2.H = #0xffc0
 61	r2.L = #0x0000
 62	r1 = and(r1,r2)		/* round PC to 4MB boundary	*/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 63	r2 = lsr(r1, #22)	/* 4MB page number		*/
 64	r2 = asl(r2, #2)	/* times sizeof(PTE) (4bytes)	*/
 65	r0 = add(r0,r2)		/* r0 = address of correct PTE	*/
 66	r2 = #PTE_BITS
 67	r1 = add(r1,r2)		/* r1 = 4MB PTE for the first entry	*/
 68	r2.h = #0x0040
 69	r2.l = #0x0000		/* 4MB	*/
 70	memw(r0 ++ #4) = r1
 71	r1 = add(r1, r2)
 72	memw(r0 ++ #4) = r1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 73
 74	r0 = r24
 75
 76	/*
 77	 * The subroutine wrapper around the virtual instruction touches
 78	 * no memory, so we should be able to use it even here.
 
 
 79	 */
 
 80	call	__vmnewmap;
 81
 82	/*  Jump into virtual address range.  */
 83
 84	r31.h = #hi(__head_s_vaddr_target)
 85	r31.l = #lo(__head_s_vaddr_target)
 86	jumpr r31
 87
 88	/*  Insert trippy space effects.  */
 89
 90__head_s_vaddr_target:
 91	/*
 92	 * Tear down VA=PA translation now that we are running
 93	 * in the desgnated kernel segments.
 94	 */
 95	r0 = #__HVM_PDE_S_INVALID
 96	r1 = r24
 97	loop0(1f,#0x100)
 
 
 
 
 
 
 
 
 
 
 
 981:
 99	{
100		memw(R1 ++ #4) = R0
101	}:endloop0
102
103	r0 = r24
 
104	call __vmnewmap
105
106	/*  Go ahead and install the trap0 return so angel calls work  */
107	r0.h = #hi(_K_provisional_vec)
108	r0.l = #lo(_K_provisional_vec)
109	call __vmsetvec
110
111	/*
112	 * OK, at this point we should start to be much more careful,
113	 * we're going to enter C code and start touching memory
114	 * in all sorts of places.
115	 * This means:
116	 *      SGP needs to be OK
117	 *	Need to lock shared resources
118	 *	A bunch of other things that will cause
119	 * 	all kinds of painful bugs
120	 */
121
122	/*
123	 * Stack pointer should be pointed at the init task's
124	 * thread stack, which should have been declared in arch/init_task.c.
125	 * So uhhhhh...
126	 * It's accessible via the init_thread_union, which is a union
127	 * of a thread_info struct and a stack; of course, the top
128	 * of the stack is not for you.  The end of the stack
129	 * is simply init_thread_union + THREAD_SIZE.
130	 */
131
132	{r29.H = #HI(init_thread_union); r0.H = #HI(_THREAD_SIZE); }
133	{r29.L = #LO(init_thread_union); r0.L = #LO(_THREAD_SIZE); }
134
135	/*  initialize the register used to point to current_thread_info */
136	/*  Fixme:  THREADINFO_REG can't be R2 because of that memset thing. */
137	{r29 = add(r29,r0); THREADINFO_REG = r29; }
138
139	/*  Hack:  zero bss; */
140	{ r0.L = #LO(__bss_start);  r1 = #0; r2.l = #LO(__bss_stop); }
141	{ r0.H = #HI(__bss_start);           r2.h = #HI(__bss_stop); }
142
143	r2 = sub(r2,r0);
144	call memset;
 
 
 
 
 
 
 
145
146	/* Time to make the doughnuts.   */
147	call start_kernel
148
149	/*
150	 * Should not reach here.
151	 */
1521:
153	jump 1b
154
155.p2align PAGE_SHIFT
156ENTRY(external_cmdline_buffer)
157        .fill _PAGE_SIZE,1,0
158
159.data
160.p2align PAGE_SHIFT
161ENTRY(empty_zero_page)
162        .fill _PAGE_SIZE,1,0