Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 *  linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core
  3 *
  4 *  PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core.
  5 *
  6 *  Heavily based on proc-arm926.S and proc-xsc3.S
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License as published by
 10 * the Free Software Foundation; either version 2 of the License, or
 11 * (at your option) any later version.
 12 *
 13 * This program is distributed in the hope that it will be useful,
 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 16 * GNU General Public License for more details.
 17 *
 18 * You should have received a copy of the GNU General Public License
 19 * along with this program; if not, write to the Free Software
 20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 21 */
 22
 23#include <linux/linkage.h>
 24#include <linux/init.h>
 
 25#include <asm/assembler.h>
 26#include <asm/hwcap.h>
 27#include <asm/pgtable-hwdef.h>
 28#include <asm/pgtable.h>
 29#include <asm/page.h>
 30#include <asm/ptrace.h>
 31#include "proc-macros.S"
 32
 33/*
 34 * This is the maximum size of an area which will be flushed.  If the
 35 * area is larger than this, then we flush the whole cache.
 36 */
 37#define CACHE_DLIMIT	32768
 38
 39/*
 40 * The cache line size of the L1 D cache.
 41 */
 42#define CACHE_DLINESIZE	32
 43
 44/*
 45 * cpu_mohawk_proc_init()
 46 */
 47ENTRY(cpu_mohawk_proc_init)
 48	ret	lr
 49
 50/*
 51 * cpu_mohawk_proc_fin()
 52 */
 53ENTRY(cpu_mohawk_proc_fin)
 54	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
 55	bic	r0, r0, #0x1800			@ ...iz...........
 56	bic	r0, r0, #0x0006			@ .............ca.
 57	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
 58	ret	lr
 59
 60/*
 61 * cpu_mohawk_reset(loc)
 62 *
 63 * Perform a soft reset of the system.  Put the CPU into the
 64 * same state as it would be if it had been reset, and branch
 65 * to what would be the reset vector.
 66 *
 67 * loc: location to jump to for soft reset
 68 *
 69 * (same as arm926)
 70 */
 71	.align	5
 72	.pushsection	.idmap.text, "ax"
 73ENTRY(cpu_mohawk_reset)
 74	mov	ip, #0
 75	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 76	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
 77	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
 78	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 79	bic	ip, ip, #0x0007			@ .............cam
 80	bic	ip, ip, #0x1100			@ ...i...s........
 81	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 82	ret	r0
 83ENDPROC(cpu_mohawk_reset)
 84	.popsection
 85
 86/*
 87 * cpu_mohawk_do_idle()
 88 *
 89 * Called with IRQs disabled
 90 */
 91	.align	5
 92ENTRY(cpu_mohawk_do_idle)
 93	mov	r0, #0
 94	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 95	mcr	p15, 0, r0, c7, c0, 4		@ wait for interrupt
 96	ret	lr
 97
 98/*
 99 *	flush_icache_all()
100 *
101 *	Unconditionally clean and invalidate the entire icache.
102 */
103ENTRY(mohawk_flush_icache_all)
104	mov	r0, #0
105	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
106	ret	lr
107ENDPROC(mohawk_flush_icache_all)
108
109/*
110 *	flush_user_cache_all()
111 *
112 *	Clean and invalidate all cache entries in a particular
113 *	address space.
114 */
115ENTRY(mohawk_flush_user_cache_all)
116	/* FALLTHROUGH */
117
118/*
119 *	flush_kern_cache_all()
120 *
121 *	Clean and invalidate the entire cache.
122 */
123ENTRY(mohawk_flush_kern_cache_all)
124	mov	r2, #VM_EXEC
125	mov	ip, #0
126__flush_whole_cache:
127	mcr	p15, 0, ip, c7, c14, 0		@ clean & invalidate all D cache
128	tst	r2, #VM_EXEC
129	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
130	mcrne	p15, 0, ip, c7, c10, 0		@ drain write buffer
131	ret	lr
132
133/*
134 *	flush_user_cache_range(start, end, flags)
135 *
136 *	Clean and invalidate a range of cache entries in the
137 *	specified address range.
138 *
139 *	- start	- start address (inclusive)
140 *	- end	- end address (exclusive)
141 *	- flags	- vm_flags describing address space
142 *
143 * (same as arm926)
144 */
145ENTRY(mohawk_flush_user_cache_range)
146	mov	ip, #0
147	sub	r3, r1, r0			@ calculate total size
148	cmp	r3, #CACHE_DLIMIT
149	bgt	__flush_whole_cache
1501:	tst	r2, #VM_EXEC
151	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
152	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
153	add	r0, r0, #CACHE_DLINESIZE
154	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
155	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
156	add	r0, r0, #CACHE_DLINESIZE
157	cmp	r0, r1
158	blo	1b
159	tst	r2, #VM_EXEC
160	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
161	ret	lr
162
163/*
164 *	coherent_kern_range(start, end)
165 *
166 *	Ensure coherency between the Icache and the Dcache in the
167 *	region described by start, end.  If you have non-snooping
168 *	Harvard caches, you need to implement this function.
169 *
170 *	- start	- virtual start address
171 *	- end	- virtual end address
172 */
173ENTRY(mohawk_coherent_kern_range)
174	/* FALLTHROUGH */
175
176/*
177 *	coherent_user_range(start, end)
178 *
179 *	Ensure coherency between the Icache and the Dcache in the
180 *	region described by start, end.  If you have non-snooping
181 *	Harvard caches, you need to implement this function.
182 *
183 *	- start	- virtual start address
184 *	- end	- virtual end address
185 *
186 * (same as arm926)
187 */
188ENTRY(mohawk_coherent_user_range)
189	bic	r0, r0, #CACHE_DLINESIZE - 1
1901:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
191	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
192	add	r0, r0, #CACHE_DLINESIZE
193	cmp	r0, r1
194	blo	1b
195	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
196	mov	r0, #0
197	ret	lr
198
199/*
200 *	flush_kern_dcache_area(void *addr, size_t size)
201 *
202 *	Ensure no D cache aliasing occurs, either with itself or
203 *	the I cache
204 *
205 *	- addr	- kernel address
206 *	- size	- region size
207 */
208ENTRY(mohawk_flush_kern_dcache_area)
209	add	r1, r0, r1
2101:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
211	add	r0, r0, #CACHE_DLINESIZE
212	cmp	r0, r1
213	blo	1b
214	mov	r0, #0
215	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
216	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
217	ret	lr
218
219/*
220 *	dma_inv_range(start, end)
221 *
222 *	Invalidate (discard) the specified virtual address range.
223 *	May not write back any entries.  If 'start' or 'end'
224 *	are not cache line aligned, those lines must be written
225 *	back.
226 *
227 *	- start	- virtual start address
228 *	- end	- virtual end address
229 *
230 * (same as v4wb)
231 */
232mohawk_dma_inv_range:
233	tst	r0, #CACHE_DLINESIZE - 1
234	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
235	tst	r1, #CACHE_DLINESIZE - 1
236	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
237	bic	r0, r0, #CACHE_DLINESIZE - 1
2381:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
239	add	r0, r0, #CACHE_DLINESIZE
240	cmp	r0, r1
241	blo	1b
242	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
243	ret	lr
244
245/*
246 *	dma_clean_range(start, end)
247 *
248 *	Clean the specified virtual address range.
249 *
250 *	- start	- virtual start address
251 *	- end	- virtual end address
252 *
253 * (same as v4wb)
254 */
255mohawk_dma_clean_range:
256	bic	r0, r0, #CACHE_DLINESIZE - 1
2571:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
258	add	r0, r0, #CACHE_DLINESIZE
259	cmp	r0, r1
260	blo	1b
261	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
262	ret	lr
263
264/*
265 *	dma_flush_range(start, end)
266 *
267 *	Clean and invalidate the specified virtual address range.
268 *
269 *	- start	- virtual start address
270 *	- end	- virtual end address
271 */
272ENTRY(mohawk_dma_flush_range)
273	bic	r0, r0, #CACHE_DLINESIZE - 1
2741:
275	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
276	add	r0, r0, #CACHE_DLINESIZE
277	cmp	r0, r1
278	blo	1b
279	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
280	ret	lr
281
282/*
283 *	dma_map_area(start, size, dir)
284 *	- start	- kernel virtual start address
285 *	- size	- size of region
286 *	- dir	- DMA direction
287 */
288ENTRY(mohawk_dma_map_area)
289	add	r1, r1, r0
290	cmp	r2, #DMA_TO_DEVICE
291	beq	mohawk_dma_clean_range
292	bcs	mohawk_dma_inv_range
293	b	mohawk_dma_flush_range
294ENDPROC(mohawk_dma_map_area)
295
296/*
297 *	dma_unmap_area(start, size, dir)
298 *	- start	- kernel virtual start address
299 *	- size	- size of region
300 *	- dir	- DMA direction
301 */
302ENTRY(mohawk_dma_unmap_area)
303	ret	lr
304ENDPROC(mohawk_dma_unmap_area)
305
306	.globl	mohawk_flush_kern_cache_louis
307	.equ	mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all
308
309	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
310	define_cache_functions mohawk
311
312ENTRY(cpu_mohawk_dcache_clean_area)
3131:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
314	add	r0, r0, #CACHE_DLINESIZE
315	subs	r1, r1, #CACHE_DLINESIZE
316	bhi	1b
317	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
318	ret	lr
319
320/*
321 * cpu_mohawk_switch_mm(pgd)
322 *
323 * Set the translation base pointer to be as described by pgd.
324 *
325 * pgd: new page tables
326 */
327	.align	5
328ENTRY(cpu_mohawk_switch_mm)
329	mov	ip, #0
330	mcr	p15, 0, ip, c7, c14, 0		@ clean & invalidate all D cache
331	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
332	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
333	orr	r0, r0, #0x18			@ cache the page table in L2
334	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
335	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
336	ret	lr
337
338/*
339 * cpu_mohawk_set_pte_ext(ptep, pte, ext)
340 *
341 * Set a PTE and flush it out
342 */
343	.align	5
344ENTRY(cpu_mohawk_set_pte_ext)
345#ifdef CONFIG_MMU
346	armv3_set_pte_ext
347	mov	r0, r0
348	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
349	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
350	ret	lr
351#endif
352
353.globl	cpu_mohawk_suspend_size
354.equ	cpu_mohawk_suspend_size, 4 * 6
355#ifdef CONFIG_ARM_CPU_SUSPEND
356ENTRY(cpu_mohawk_do_suspend)
357	stmfd	sp!, {r4 - r9, lr}
358	mrc	p14, 0, r4, c6, c0, 0	@ clock configuration, for turbo mode
359	mrc	p15, 0, r5, c15, c1, 0	@ CP access reg
360	mrc	p15, 0, r6, c13, c0, 0	@ PID
361	mrc 	p15, 0, r7, c3, c0, 0	@ domain ID
362	mrc	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
363	mrc 	p15, 0, r9, c1, c0, 0	@ control reg
364	bic	r4, r4, #2		@ clear frequency change bit
365	stmia	r0, {r4 - r9}		@ store cp regs
366	ldmia	sp!, {r4 - r9, pc}
367ENDPROC(cpu_mohawk_do_suspend)
368
369ENTRY(cpu_mohawk_do_resume)
370	ldmia	r0, {r4 - r9}		@ load cp regs
371	mov	ip, #0
372	mcr	p15, 0, ip, c7, c7, 0	@ invalidate I & D caches, BTB
373	mcr	p15, 0, ip, c7, c10, 4	@ drain write (&fill) buffer
374	mcr	p15, 0, ip, c7, c5, 4	@ flush prefetch buffer
375	mcr	p15, 0, ip, c8, c7, 0	@ invalidate I & D TLBs
376	mcr	p14, 0, r4, c6, c0, 0	@ clock configuration, turbo mode.
377	mcr	p15, 0, r5, c15, c1, 0	@ CP access reg
378	mcr	p15, 0, r6, c13, c0, 0	@ PID
379	mcr	p15, 0, r7, c3, c0, 0	@ domain ID
380	orr	r1, r1, #0x18		@ cache the page table in L2
381	mcr	p15, 0, r1, c2, c0, 0	@ translation table base addr
382	mcr	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
383	mov	r0, r9			@ control register
384	b	cpu_resume_mmu
385ENDPROC(cpu_mohawk_do_resume)
386#endif
387
388	.type	__mohawk_setup, #function
389__mohawk_setup:
390	mov	r0, #0
391	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches
392	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
393	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs
394	orr	r4, r4, #0x18			@ cache the page table in L2
395	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
396
397	mov	r0, #0				@ don't allow CP access
398	mcr	p15, 0, r0, c15, c1, 0		@ write CP access register
399
400	adr	r5, mohawk_crval
401	ldmia	r5, {r5, r6}
402	mrc	p15, 0, r0, c1, c0		@ get control register
403	bic	r0, r0, r5
404	orr	r0, r0, r6
405	ret	lr
406
407	.size	__mohawk_setup, . - __mohawk_setup
408
409	/*
410	 *  R
411	 * .RVI ZFRS BLDP WCAM
412	 * .011 1001 ..00 0101
413	 *
414	 */
415	.type	mohawk_crval, #object
416mohawk_crval:
417	crval	clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134
418
419	__INITDATA
420
421	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
422	define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort
423
424	.section ".rodata"
425
426	string	cpu_arch_name, "armv5te"
427	string	cpu_elf_name, "v5"
428	string	cpu_mohawk_name, "Marvell 88SV331x"
429
430	.align
431
432	.section ".proc.info.init", #alloc
433
434	.type	__88sv331x_proc_info,#object
435__88sv331x_proc_info:
436	.long	0x56158000			@ Marvell 88SV331x (MOHAWK)
437	.long	0xfffff000
438	.long   PMD_TYPE_SECT | \
439		PMD_SECT_BUFFERABLE | \
440		PMD_SECT_CACHEABLE | \
441		PMD_BIT4 | \
442		PMD_SECT_AP_WRITE | \
443		PMD_SECT_AP_READ
444	.long   PMD_TYPE_SECT | \
445		PMD_BIT4 | \
446		PMD_SECT_AP_WRITE | \
447		PMD_SECT_AP_READ
448	initfn	__mohawk_setup, __88sv331x_proc_info
449	.long	cpu_arch_name
450	.long	cpu_elf_name
451	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
452	.long	cpu_mohawk_name
453	.long	mohawk_processor_functions
454	.long	v4wbi_tlb_fns
455	.long	v4wb_user_fns
456	.long	mohawk_cache_fns
457	.size	__88sv331x_proc_info, . - __88sv331x_proc_info
v5.9
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 *  linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core
  4 *
  5 *  PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core.
  6 *
  7 *  Heavily based on proc-arm926.S and proc-xsc3.S
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  8 */
  9
 10#include <linux/linkage.h>
 11#include <linux/init.h>
 12#include <linux/pgtable.h>
 13#include <asm/assembler.h>
 14#include <asm/hwcap.h>
 15#include <asm/pgtable-hwdef.h>
 
 16#include <asm/page.h>
 17#include <asm/ptrace.h>
 18#include "proc-macros.S"
 19
 20/*
 21 * This is the maximum size of an area which will be flushed.  If the
 22 * area is larger than this, then we flush the whole cache.
 23 */
 24#define CACHE_DLIMIT	32768
 25
 26/*
 27 * The cache line size of the L1 D cache.
 28 */
 29#define CACHE_DLINESIZE	32
 30
 31/*
 32 * cpu_mohawk_proc_init()
 33 */
 34ENTRY(cpu_mohawk_proc_init)
 35	ret	lr
 36
 37/*
 38 * cpu_mohawk_proc_fin()
 39 */
 40ENTRY(cpu_mohawk_proc_fin)
 41	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
 42	bic	r0, r0, #0x1800			@ ...iz...........
 43	bic	r0, r0, #0x0006			@ .............ca.
 44	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
 45	ret	lr
 46
 47/*
 48 * cpu_mohawk_reset(loc)
 49 *
 50 * Perform a soft reset of the system.  Put the CPU into the
 51 * same state as it would be if it had been reset, and branch
 52 * to what would be the reset vector.
 53 *
 54 * loc: location to jump to for soft reset
 55 *
 56 * (same as arm926)
 57 */
 58	.align	5
 59	.pushsection	.idmap.text, "ax"
 60ENTRY(cpu_mohawk_reset)
 61	mov	ip, #0
 62	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
 63	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
 64	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
 65	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
 66	bic	ip, ip, #0x0007			@ .............cam
 67	bic	ip, ip, #0x1100			@ ...i...s........
 68	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
 69	ret	r0
 70ENDPROC(cpu_mohawk_reset)
 71	.popsection
 72
 73/*
 74 * cpu_mohawk_do_idle()
 75 *
 76 * Called with IRQs disabled
 77 */
 78	.align	5
 79ENTRY(cpu_mohawk_do_idle)
 80	mov	r0, #0
 81	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
 82	mcr	p15, 0, r0, c7, c0, 4		@ wait for interrupt
 83	ret	lr
 84
 85/*
 86 *	flush_icache_all()
 87 *
 88 *	Unconditionally clean and invalidate the entire icache.
 89 */
 90ENTRY(mohawk_flush_icache_all)
 91	mov	r0, #0
 92	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
 93	ret	lr
 94ENDPROC(mohawk_flush_icache_all)
 95
 96/*
 97 *	flush_user_cache_all()
 98 *
 99 *	Clean and invalidate all cache entries in a particular
100 *	address space.
101 */
102ENTRY(mohawk_flush_user_cache_all)
103	/* FALLTHROUGH */
104
105/*
106 *	flush_kern_cache_all()
107 *
108 *	Clean and invalidate the entire cache.
109 */
110ENTRY(mohawk_flush_kern_cache_all)
111	mov	r2, #VM_EXEC
112	mov	ip, #0
113__flush_whole_cache:
114	mcr	p15, 0, ip, c7, c14, 0		@ clean & invalidate all D cache
115	tst	r2, #VM_EXEC
116	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
117	mcrne	p15, 0, ip, c7, c10, 0		@ drain write buffer
118	ret	lr
119
120/*
121 *	flush_user_cache_range(start, end, flags)
122 *
123 *	Clean and invalidate a range of cache entries in the
124 *	specified address range.
125 *
126 *	- start	- start address (inclusive)
127 *	- end	- end address (exclusive)
128 *	- flags	- vm_flags describing address space
129 *
130 * (same as arm926)
131 */
132ENTRY(mohawk_flush_user_cache_range)
133	mov	ip, #0
134	sub	r3, r1, r0			@ calculate total size
135	cmp	r3, #CACHE_DLIMIT
136	bgt	__flush_whole_cache
1371:	tst	r2, #VM_EXEC
138	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
139	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
140	add	r0, r0, #CACHE_DLINESIZE
141	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
142	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
143	add	r0, r0, #CACHE_DLINESIZE
144	cmp	r0, r1
145	blo	1b
146	tst	r2, #VM_EXEC
147	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
148	ret	lr
149
150/*
151 *	coherent_kern_range(start, end)
152 *
153 *	Ensure coherency between the Icache and the Dcache in the
154 *	region described by start, end.  If you have non-snooping
155 *	Harvard caches, you need to implement this function.
156 *
157 *	- start	- virtual start address
158 *	- end	- virtual end address
159 */
160ENTRY(mohawk_coherent_kern_range)
161	/* FALLTHROUGH */
162
163/*
164 *	coherent_user_range(start, end)
165 *
166 *	Ensure coherency between the Icache and the Dcache in the
167 *	region described by start, end.  If you have non-snooping
168 *	Harvard caches, you need to implement this function.
169 *
170 *	- start	- virtual start address
171 *	- end	- virtual end address
172 *
173 * (same as arm926)
174 */
175ENTRY(mohawk_coherent_user_range)
176	bic	r0, r0, #CACHE_DLINESIZE - 1
1771:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
178	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
179	add	r0, r0, #CACHE_DLINESIZE
180	cmp	r0, r1
181	blo	1b
182	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
183	mov	r0, #0
184	ret	lr
185
186/*
187 *	flush_kern_dcache_area(void *addr, size_t size)
188 *
189 *	Ensure no D cache aliasing occurs, either with itself or
190 *	the I cache
191 *
192 *	- addr	- kernel address
193 *	- size	- region size
194 */
195ENTRY(mohawk_flush_kern_dcache_area)
196	add	r1, r0, r1
1971:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
198	add	r0, r0, #CACHE_DLINESIZE
199	cmp	r0, r1
200	blo	1b
201	mov	r0, #0
202	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
203	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
204	ret	lr
205
206/*
207 *	dma_inv_range(start, end)
208 *
209 *	Invalidate (discard) the specified virtual address range.
210 *	May not write back any entries.  If 'start' or 'end'
211 *	are not cache line aligned, those lines must be written
212 *	back.
213 *
214 *	- start	- virtual start address
215 *	- end	- virtual end address
216 *
217 * (same as v4wb)
218 */
219mohawk_dma_inv_range:
220	tst	r0, #CACHE_DLINESIZE - 1
221	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
222	tst	r1, #CACHE_DLINESIZE - 1
223	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
224	bic	r0, r0, #CACHE_DLINESIZE - 1
2251:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
226	add	r0, r0, #CACHE_DLINESIZE
227	cmp	r0, r1
228	blo	1b
229	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
230	ret	lr
231
232/*
233 *	dma_clean_range(start, end)
234 *
235 *	Clean the specified virtual address range.
236 *
237 *	- start	- virtual start address
238 *	- end	- virtual end address
239 *
240 * (same as v4wb)
241 */
242mohawk_dma_clean_range:
243	bic	r0, r0, #CACHE_DLINESIZE - 1
2441:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
245	add	r0, r0, #CACHE_DLINESIZE
246	cmp	r0, r1
247	blo	1b
248	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
249	ret	lr
250
251/*
252 *	dma_flush_range(start, end)
253 *
254 *	Clean and invalidate the specified virtual address range.
255 *
256 *	- start	- virtual start address
257 *	- end	- virtual end address
258 */
259ENTRY(mohawk_dma_flush_range)
260	bic	r0, r0, #CACHE_DLINESIZE - 1
2611:
262	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
263	add	r0, r0, #CACHE_DLINESIZE
264	cmp	r0, r1
265	blo	1b
266	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
267	ret	lr
268
269/*
270 *	dma_map_area(start, size, dir)
271 *	- start	- kernel virtual start address
272 *	- size	- size of region
273 *	- dir	- DMA direction
274 */
275ENTRY(mohawk_dma_map_area)
276	add	r1, r1, r0
277	cmp	r2, #DMA_TO_DEVICE
278	beq	mohawk_dma_clean_range
279	bcs	mohawk_dma_inv_range
280	b	mohawk_dma_flush_range
281ENDPROC(mohawk_dma_map_area)
282
283/*
284 *	dma_unmap_area(start, size, dir)
285 *	- start	- kernel virtual start address
286 *	- size	- size of region
287 *	- dir	- DMA direction
288 */
289ENTRY(mohawk_dma_unmap_area)
290	ret	lr
291ENDPROC(mohawk_dma_unmap_area)
292
293	.globl	mohawk_flush_kern_cache_louis
294	.equ	mohawk_flush_kern_cache_louis, mohawk_flush_kern_cache_all
295
296	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
297	define_cache_functions mohawk
298
299ENTRY(cpu_mohawk_dcache_clean_area)
3001:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
301	add	r0, r0, #CACHE_DLINESIZE
302	subs	r1, r1, #CACHE_DLINESIZE
303	bhi	1b
304	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
305	ret	lr
306
307/*
308 * cpu_mohawk_switch_mm(pgd)
309 *
310 * Set the translation base pointer to be as described by pgd.
311 *
312 * pgd: new page tables
313 */
314	.align	5
315ENTRY(cpu_mohawk_switch_mm)
316	mov	ip, #0
317	mcr	p15, 0, ip, c7, c14, 0		@ clean & invalidate all D cache
318	mcr	p15, 0, ip, c7, c5, 0		@ invalidate I cache
319	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
320	orr	r0, r0, #0x18			@ cache the page table in L2
321	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
322	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
323	ret	lr
324
325/*
326 * cpu_mohawk_set_pte_ext(ptep, pte, ext)
327 *
328 * Set a PTE and flush it out
329 */
330	.align	5
331ENTRY(cpu_mohawk_set_pte_ext)
332#ifdef CONFIG_MMU
333	armv3_set_pte_ext
334	mov	r0, r0
335	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
336	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
337	ret	lr
338#endif
339
340.globl	cpu_mohawk_suspend_size
341.equ	cpu_mohawk_suspend_size, 4 * 6
342#ifdef CONFIG_ARM_CPU_SUSPEND
343ENTRY(cpu_mohawk_do_suspend)
344	stmfd	sp!, {r4 - r9, lr}
345	mrc	p14, 0, r4, c6, c0, 0	@ clock configuration, for turbo mode
346	mrc	p15, 0, r5, c15, c1, 0	@ CP access reg
347	mrc	p15, 0, r6, c13, c0, 0	@ PID
348	mrc 	p15, 0, r7, c3, c0, 0	@ domain ID
349	mrc	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
350	mrc 	p15, 0, r9, c1, c0, 0	@ control reg
351	bic	r4, r4, #2		@ clear frequency change bit
352	stmia	r0, {r4 - r9}		@ store cp regs
353	ldmia	sp!, {r4 - r9, pc}
354ENDPROC(cpu_mohawk_do_suspend)
355
356ENTRY(cpu_mohawk_do_resume)
357	ldmia	r0, {r4 - r9}		@ load cp regs
358	mov	ip, #0
359	mcr	p15, 0, ip, c7, c7, 0	@ invalidate I & D caches, BTB
360	mcr	p15, 0, ip, c7, c10, 4	@ drain write (&fill) buffer
361	mcr	p15, 0, ip, c7, c5, 4	@ flush prefetch buffer
362	mcr	p15, 0, ip, c8, c7, 0	@ invalidate I & D TLBs
363	mcr	p14, 0, r4, c6, c0, 0	@ clock configuration, turbo mode.
364	mcr	p15, 0, r5, c15, c1, 0	@ CP access reg
365	mcr	p15, 0, r6, c13, c0, 0	@ PID
366	mcr	p15, 0, r7, c3, c0, 0	@ domain ID
367	orr	r1, r1, #0x18		@ cache the page table in L2
368	mcr	p15, 0, r1, c2, c0, 0	@ translation table base addr
369	mcr	p15, 0, r8, c1, c0, 1	@ auxiliary control reg
370	mov	r0, r9			@ control register
371	b	cpu_resume_mmu
372ENDPROC(cpu_mohawk_do_resume)
373#endif
374
375	.type	__mohawk_setup, #function
376__mohawk_setup:
377	mov	r0, #0
378	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches
379	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer
380	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs
381	orr	r4, r4, #0x18			@ cache the page table in L2
382	mcr	p15, 0, r4, c2, c0, 0		@ load page table pointer
383
384	mov	r0, #0				@ don't allow CP access
385	mcr	p15, 0, r0, c15, c1, 0		@ write CP access register
386
387	adr	r5, mohawk_crval
388	ldmia	r5, {r5, r6}
389	mrc	p15, 0, r0, c1, c0		@ get control register
390	bic	r0, r0, r5
391	orr	r0, r0, r6
392	ret	lr
393
394	.size	__mohawk_setup, . - __mohawk_setup
395
396	/*
397	 *  R
398	 * .RVI ZFRS BLDP WCAM
399	 * .011 1001 ..00 0101
400	 *
401	 */
402	.type	mohawk_crval, #object
403mohawk_crval:
404	crval	clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134
405
406	__INITDATA
407
408	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
409	define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort
410
411	.section ".rodata"
412
413	string	cpu_arch_name, "armv5te"
414	string	cpu_elf_name, "v5"
415	string	cpu_mohawk_name, "Marvell 88SV331x"
416
417	.align
418
419	.section ".proc.info.init", "a"
420
421	.type	__88sv331x_proc_info,#object
422__88sv331x_proc_info:
423	.long	0x56158000			@ Marvell 88SV331x (MOHAWK)
424	.long	0xfffff000
425	.long   PMD_TYPE_SECT | \
426		PMD_SECT_BUFFERABLE | \
427		PMD_SECT_CACHEABLE | \
428		PMD_BIT4 | \
429		PMD_SECT_AP_WRITE | \
430		PMD_SECT_AP_READ
431	.long   PMD_TYPE_SECT | \
432		PMD_BIT4 | \
433		PMD_SECT_AP_WRITE | \
434		PMD_SECT_AP_READ
435	initfn	__mohawk_setup, __88sv331x_proc_info
436	.long	cpu_arch_name
437	.long	cpu_elf_name
438	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
439	.long	cpu_mohawk_name
440	.long	mohawk_processor_functions
441	.long	v4wbi_tlb_fns
442	.long	v4wb_user_fns
443	.long	mohawk_cache_fns
444	.size	__88sv331x_proc_info, . - __88sv331x_proc_info