Loading...
1/*
2 * linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core
3 *
4 * PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core.
5 *
6 * Heavily based on proc-arm926.S and proc-xsc3.S
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/linkage.h>
24#include <linux/init.h>
25#include <asm/assembler.h>
26#include <asm/hwcap.h>
27#include <asm/pgtable-hwdef.h>
28#include <asm/pgtable.h>
29#include <asm/page.h>
30#include <asm/ptrace.h>
31#include "proc-macros.S"
32
33/*
34 * This is the maximum size of an area which will be flushed. If the
35 * area is larger than this, then we flush the whole cache.
36 */
37#define CACHE_DLIMIT 32768
38
39/*
40 * The cache line size of the L1 D cache.
41 */
42#define CACHE_DLINESIZE 32
43
44/*
45 * cpu_mohawk_proc_init()
46 */
47ENTRY(cpu_mohawk_proc_init)
48 mov pc, lr
49
50/*
51 * cpu_mohawk_proc_fin()
52 */
53ENTRY(cpu_mohawk_proc_fin)
54 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
55 bic r0, r0, #0x1800 @ ...iz...........
56 bic r0, r0, #0x0006 @ .............ca.
57 mcr p15, 0, r0, c1, c0, 0 @ disable caches
58 mov pc, lr
59
60/*
61 * cpu_mohawk_reset(loc)
62 *
63 * Perform a soft reset of the system. Put the CPU into the
64 * same state as it would be if it had been reset, and branch
65 * to what would be the reset vector.
66 *
67 * loc: location to jump to for soft reset
68 *
69 * (same as arm926)
70 */
71 .align 5
72ENTRY(cpu_mohawk_reset)
73 mov ip, #0
74 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
75 mcr p15, 0, ip, c7, c10, 4 @ drain WB
76 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
77 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
78 bic ip, ip, #0x0007 @ .............cam
79 bic ip, ip, #0x1100 @ ...i...s........
80 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
81 mov pc, r0
82
83/*
84 * cpu_mohawk_do_idle()
85 *
86 * Called with IRQs disabled
87 */
88 .align 5
89ENTRY(cpu_mohawk_do_idle)
90 mov r0, #0
91 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
92 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
93 mov pc, lr
94
95/*
96 * flush_icache_all()
97 *
98 * Unconditionally clean and invalidate the entire icache.
99 */
100ENTRY(mohawk_flush_icache_all)
101 mov r0, #0
102 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
103 mov pc, lr
104ENDPROC(mohawk_flush_icache_all)
105
106/*
107 * flush_user_cache_all()
108 *
109 * Clean and invalidate all cache entries in a particular
110 * address space.
111 */
112ENTRY(mohawk_flush_user_cache_all)
113 /* FALLTHROUGH */
114
115/*
116 * flush_kern_cache_all()
117 *
118 * Clean and invalidate the entire cache.
119 */
120ENTRY(mohawk_flush_kern_cache_all)
121 mov r2, #VM_EXEC
122 mov ip, #0
123__flush_whole_cache:
124 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
125 tst r2, #VM_EXEC
126 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
127 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer
128 mov pc, lr
129
130/*
131 * flush_user_cache_range(start, end, flags)
132 *
133 * Clean and invalidate a range of cache entries in the
134 * specified address range.
135 *
136 * - start - start address (inclusive)
137 * - end - end address (exclusive)
138 * - flags - vm_flags describing address space
139 *
140 * (same as arm926)
141 */
142ENTRY(mohawk_flush_user_cache_range)
143 mov ip, #0
144 sub r3, r1, r0 @ calculate total size
145 cmp r3, #CACHE_DLIMIT
146 bgt __flush_whole_cache
1471: tst r2, #VM_EXEC
148 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
149 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
150 add r0, r0, #CACHE_DLINESIZE
151 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
152 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
153 add r0, r0, #CACHE_DLINESIZE
154 cmp r0, r1
155 blo 1b
156 tst r2, #VM_EXEC
157 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
158 mov pc, lr
159
160/*
161 * coherent_kern_range(start, end)
162 *
163 * Ensure coherency between the Icache and the Dcache in the
164 * region described by start, end. If you have non-snooping
165 * Harvard caches, you need to implement this function.
166 *
167 * - start - virtual start address
168 * - end - virtual end address
169 */
170ENTRY(mohawk_coherent_kern_range)
171 /* FALLTHROUGH */
172
173/*
174 * coherent_user_range(start, end)
175 *
176 * Ensure coherency between the Icache and the Dcache in the
177 * region described by start, end. If you have non-snooping
178 * Harvard caches, you need to implement this function.
179 *
180 * - start - virtual start address
181 * - end - virtual end address
182 *
183 * (same as arm926)
184 */
185ENTRY(mohawk_coherent_user_range)
186 bic r0, r0, #CACHE_DLINESIZE - 1
1871: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
188 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
189 add r0, r0, #CACHE_DLINESIZE
190 cmp r0, r1
191 blo 1b
192 mcr p15, 0, r0, c7, c10, 4 @ drain WB
193 mov pc, lr
194
195/*
196 * flush_kern_dcache_area(void *addr, size_t size)
197 *
198 * Ensure no D cache aliasing occurs, either with itself or
199 * the I cache
200 *
201 * - addr - kernel address
202 * - size - region size
203 */
204ENTRY(mohawk_flush_kern_dcache_area)
205 add r1, r0, r1
2061: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
207 add r0, r0, #CACHE_DLINESIZE
208 cmp r0, r1
209 blo 1b
210 mov r0, #0
211 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
212 mcr p15, 0, r0, c7, c10, 4 @ drain WB
213 mov pc, lr
214
215/*
216 * dma_inv_range(start, end)
217 *
218 * Invalidate (discard) the specified virtual address range.
219 * May not write back any entries. If 'start' or 'end'
220 * are not cache line aligned, those lines must be written
221 * back.
222 *
223 * - start - virtual start address
224 * - end - virtual end address
225 *
226 * (same as v4wb)
227 */
228mohawk_dma_inv_range:
229 tst r0, #CACHE_DLINESIZE - 1
230 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
231 tst r1, #CACHE_DLINESIZE - 1
232 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
233 bic r0, r0, #CACHE_DLINESIZE - 1
2341: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
235 add r0, r0, #CACHE_DLINESIZE
236 cmp r0, r1
237 blo 1b
238 mcr p15, 0, r0, c7, c10, 4 @ drain WB
239 mov pc, lr
240
241/*
242 * dma_clean_range(start, end)
243 *
244 * Clean the specified virtual address range.
245 *
246 * - start - virtual start address
247 * - end - virtual end address
248 *
249 * (same as v4wb)
250 */
251mohawk_dma_clean_range:
252 bic r0, r0, #CACHE_DLINESIZE - 1
2531: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
254 add r0, r0, #CACHE_DLINESIZE
255 cmp r0, r1
256 blo 1b
257 mcr p15, 0, r0, c7, c10, 4 @ drain WB
258 mov pc, lr
259
260/*
261 * dma_flush_range(start, end)
262 *
263 * Clean and invalidate the specified virtual address range.
264 *
265 * - start - virtual start address
266 * - end - virtual end address
267 */
268ENTRY(mohawk_dma_flush_range)
269 bic r0, r0, #CACHE_DLINESIZE - 1
2701:
271 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
272 add r0, r0, #CACHE_DLINESIZE
273 cmp r0, r1
274 blo 1b
275 mcr p15, 0, r0, c7, c10, 4 @ drain WB
276 mov pc, lr
277
278/*
279 * dma_map_area(start, size, dir)
280 * - start - kernel virtual start address
281 * - size - size of region
282 * - dir - DMA direction
283 */
284ENTRY(mohawk_dma_map_area)
285 add r1, r1, r0
286 cmp r2, #DMA_TO_DEVICE
287 beq mohawk_dma_clean_range
288 bcs mohawk_dma_inv_range
289 b mohawk_dma_flush_range
290ENDPROC(mohawk_dma_map_area)
291
292/*
293 * dma_unmap_area(start, size, dir)
294 * - start - kernel virtual start address
295 * - size - size of region
296 * - dir - DMA direction
297 */
298ENTRY(mohawk_dma_unmap_area)
299 mov pc, lr
300ENDPROC(mohawk_dma_unmap_area)
301
302 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
303 define_cache_functions mohawk
304
305ENTRY(cpu_mohawk_dcache_clean_area)
3061: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
307 add r0, r0, #CACHE_DLINESIZE
308 subs r1, r1, #CACHE_DLINESIZE
309 bhi 1b
310 mcr p15, 0, r0, c7, c10, 4 @ drain WB
311 mov pc, lr
312
313/*
314 * cpu_mohawk_switch_mm(pgd)
315 *
316 * Set the translation base pointer to be as described by pgd.
317 *
318 * pgd: new page tables
319 */
320 .align 5
321ENTRY(cpu_mohawk_switch_mm)
322 mov ip, #0
323 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
324 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
325 mcr p15, 0, ip, c7, c10, 4 @ drain WB
326 orr r0, r0, #0x18 @ cache the page table in L2
327 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
328 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
329 mov pc, lr
330
331/*
332 * cpu_mohawk_set_pte_ext(ptep, pte, ext)
333 *
334 * Set a PTE and flush it out
335 */
336 .align 5
337ENTRY(cpu_mohawk_set_pte_ext)
338 armv3_set_pte_ext
339 mov r0, r0
340 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
341 mcr p15, 0, r0, c7, c10, 4 @ drain WB
342 mov pc, lr
343
344 __CPUINIT
345
346 .type __mohawk_setup, #function
347__mohawk_setup:
348 mov r0, #0
349 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches
350 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
351 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs
352 orr r4, r4, #0x18 @ cache the page table in L2
353 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
354
355 mov r0, #0 @ don't allow CP access
356 mcr p15, 0, r0, c15, c1, 0 @ write CP access register
357
358 adr r5, mohawk_crval
359 ldmia r5, {r5, r6}
360 mrc p15, 0, r0, c1, c0 @ get control register
361 bic r0, r0, r5
362 orr r0, r0, r6
363 mov pc, lr
364
365 .size __mohawk_setup, . - __mohawk_setup
366
367 /*
368 * R
369 * .RVI ZFRS BLDP WCAM
370 * .011 1001 ..00 0101
371 *
372 */
373 .type mohawk_crval, #object
374mohawk_crval:
375 crval clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134
376
377 __INITDATA
378
379 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
380 define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort
381
382 .section ".rodata"
383
384 string cpu_arch_name, "armv5te"
385 string cpu_elf_name, "v5"
386 string cpu_mohawk_name, "Marvell 88SV331x"
387
388 .align
389
390 .section ".proc.info.init", #alloc, #execinstr
391
392 .type __88sv331x_proc_info,#object
393__88sv331x_proc_info:
394 .long 0x56158000 @ Marvell 88SV331x (MOHAWK)
395 .long 0xfffff000
396 .long PMD_TYPE_SECT | \
397 PMD_SECT_BUFFERABLE | \
398 PMD_SECT_CACHEABLE | \
399 PMD_BIT4 | \
400 PMD_SECT_AP_WRITE | \
401 PMD_SECT_AP_READ
402 .long PMD_TYPE_SECT | \
403 PMD_BIT4 | \
404 PMD_SECT_AP_WRITE | \
405 PMD_SECT_AP_READ
406 b __mohawk_setup
407 .long cpu_arch_name
408 .long cpu_elf_name
409 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
410 .long cpu_mohawk_name
411 .long mohawk_processor_functions
412 .long v4wbi_tlb_fns
413 .long v4wb_user_fns
414 .long mohawk_cache_fns
415 .size __88sv331x_proc_info, . - __88sv331x_proc_info
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core
4 *
5 * PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core.
6 *
7 * Heavily based on proc-arm926.S and proc-xsc3.S
8 */
9
10#include <linux/linkage.h>
11#include <linux/init.h>
12#include <linux/cfi_types.h>
13#include <linux/pgtable.h>
14#include <asm/assembler.h>
15#include <asm/hwcap.h>
16#include <asm/pgtable-hwdef.h>
17#include <asm/page.h>
18#include <asm/ptrace.h>
19#include "proc-macros.S"
20
21/*
22 * This is the maximum size of an area which will be flushed. If the
23 * area is larger than this, then we flush the whole cache.
24 */
25#define CACHE_DLIMIT 32768
26
27/*
28 * The cache line size of the L1 D cache.
29 */
30#define CACHE_DLINESIZE 32
31
32/*
33 * cpu_mohawk_proc_init()
34 */
35SYM_TYPED_FUNC_START(cpu_mohawk_proc_init)
36 ret lr
37SYM_FUNC_END(cpu_mohawk_proc_init)
38
39/*
40 * cpu_mohawk_proc_fin()
41 */
42SYM_TYPED_FUNC_START(cpu_mohawk_proc_fin)
43 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
44 bic r0, r0, #0x1800 @ ...iz...........
45 bic r0, r0, #0x0006 @ .............ca.
46 mcr p15, 0, r0, c1, c0, 0 @ disable caches
47 ret lr
48SYM_FUNC_END(cpu_mohawk_proc_fin)
49
50/*
51 * cpu_mohawk_reset(loc)
52 *
53 * Perform a soft reset of the system. Put the CPU into the
54 * same state as it would be if it had been reset, and branch
55 * to what would be the reset vector.
56 *
57 * loc: location to jump to for soft reset
58 *
59 * (same as arm926)
60 */
61 .align 5
62 .pushsection .idmap.text, "ax"
63SYM_TYPED_FUNC_START(cpu_mohawk_reset)
64 mov ip, #0
65 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
66 mcr p15, 0, ip, c7, c10, 4 @ drain WB
67 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
68 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
69 bic ip, ip, #0x0007 @ .............cam
70 bic ip, ip, #0x1100 @ ...i...s........
71 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
72 ret r0
73SYM_FUNC_END(cpu_mohawk_reset)
74 .popsection
75
76/*
77 * cpu_mohawk_do_idle()
78 *
79 * Called with IRQs disabled
80 */
81 .align 5
82SYM_TYPED_FUNC_START(cpu_mohawk_do_idle)
83 mov r0, #0
84 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
85 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
86 ret lr
87SYM_FUNC_END(cpu_mohawk_do_idle)
88
89/*
90 * flush_icache_all()
91 *
92 * Unconditionally clean and invalidate the entire icache.
93 */
94SYM_TYPED_FUNC_START(mohawk_flush_icache_all)
95 mov r0, #0
96 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
97 ret lr
98SYM_FUNC_END(mohawk_flush_icache_all)
99
100/*
101 * flush_user_cache_all()
102 *
103 * Clean and invalidate all cache entries in a particular
104 * address space.
105 */
106SYM_FUNC_ALIAS(mohawk_flush_user_cache_all, mohawk_flush_kern_cache_all)
107
108/*
109 * flush_kern_cache_all()
110 *
111 * Clean and invalidate the entire cache.
112 */
113SYM_TYPED_FUNC_START(mohawk_flush_kern_cache_all)
114 mov r2, #VM_EXEC
115 mov ip, #0
116__flush_whole_cache:
117 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
118 tst r2, #VM_EXEC
119 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
120 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer
121 ret lr
122SYM_FUNC_END(mohawk_flush_kern_cache_all)
123
124/*
125 * flush_user_cache_range(start, end, flags)
126 *
127 * Clean and invalidate a range of cache entries in the
128 * specified address range.
129 *
130 * - start - start address (inclusive)
131 * - end - end address (exclusive)
132 * - flags - vm_flags describing address space
133 *
134 * (same as arm926)
135 */
136SYM_TYPED_FUNC_START(mohawk_flush_user_cache_range)
137 mov ip, #0
138 sub r3, r1, r0 @ calculate total size
139 cmp r3, #CACHE_DLIMIT
140 bgt __flush_whole_cache
1411: tst r2, #VM_EXEC
142 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
143 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
144 add r0, r0, #CACHE_DLINESIZE
145 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
146 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
147 add r0, r0, #CACHE_DLINESIZE
148 cmp r0, r1
149 blo 1b
150 tst r2, #VM_EXEC
151 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
152 ret lr
153SYM_FUNC_END(mohawk_flush_user_cache_range)
154
155/*
156 * coherent_kern_range(start, end)
157 *
158 * Ensure coherency between the Icache and the Dcache in the
159 * region described by start, end. If you have non-snooping
160 * Harvard caches, you need to implement this function.
161 *
162 * - start - virtual start address
163 * - end - virtual end address
164 */
165SYM_TYPED_FUNC_START(mohawk_coherent_kern_range)
166#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
167 b mohawk_coherent_user_range
168#endif
169SYM_FUNC_END(mohawk_coherent_kern_range)
170
171/*
172 * coherent_user_range(start, end)
173 *
174 * Ensure coherency between the Icache and the Dcache in the
175 * region described by start, end. If you have non-snooping
176 * Harvard caches, you need to implement this function.
177 *
178 * - start - virtual start address
179 * - end - virtual end address
180 *
181 * (same as arm926)
182 */
183SYM_TYPED_FUNC_START(mohawk_coherent_user_range)
184 bic r0, r0, #CACHE_DLINESIZE - 1
1851: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
186 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
187 add r0, r0, #CACHE_DLINESIZE
188 cmp r0, r1
189 blo 1b
190 mcr p15, 0, r0, c7, c10, 4 @ drain WB
191 mov r0, #0
192 ret lr
193SYM_FUNC_END(mohawk_coherent_user_range)
194
195/*
196 * flush_kern_dcache_area(void *addr, size_t size)
197 *
198 * Ensure no D cache aliasing occurs, either with itself or
199 * the I cache
200 *
201 * - addr - kernel address
202 * - size - region size
203 */
204SYM_TYPED_FUNC_START(mohawk_flush_kern_dcache_area)
205 add r1, r0, r1
2061: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
207 add r0, r0, #CACHE_DLINESIZE
208 cmp r0, r1
209 blo 1b
210 mov r0, #0
211 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
212 mcr p15, 0, r0, c7, c10, 4 @ drain WB
213 ret lr
214SYM_FUNC_END(mohawk_flush_kern_dcache_area)
215
216/*
217 * dma_inv_range(start, end)
218 *
219 * Invalidate (discard) the specified virtual address range.
220 * May not write back any entries. If 'start' or 'end'
221 * are not cache line aligned, those lines must be written
222 * back.
223 *
224 * - start - virtual start address
225 * - end - virtual end address
226 *
227 * (same as v4wb)
228 */
229mohawk_dma_inv_range:
230 tst r0, #CACHE_DLINESIZE - 1
231 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
232 tst r1, #CACHE_DLINESIZE - 1
233 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
234 bic r0, r0, #CACHE_DLINESIZE - 1
2351: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
236 add r0, r0, #CACHE_DLINESIZE
237 cmp r0, r1
238 blo 1b
239 mcr p15, 0, r0, c7, c10, 4 @ drain WB
240 ret lr
241
242/*
243 * dma_clean_range(start, end)
244 *
245 * Clean the specified virtual address range.
246 *
247 * - start - virtual start address
248 * - end - virtual end address
249 *
250 * (same as v4wb)
251 */
252mohawk_dma_clean_range:
253 bic r0, r0, #CACHE_DLINESIZE - 1
2541: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
255 add r0, r0, #CACHE_DLINESIZE
256 cmp r0, r1
257 blo 1b
258 mcr p15, 0, r0, c7, c10, 4 @ drain WB
259 ret lr
260
261/*
262 * dma_flush_range(start, end)
263 *
264 * Clean and invalidate the specified virtual address range.
265 *
266 * - start - virtual start address
267 * - end - virtual end address
268 */
269SYM_TYPED_FUNC_START(mohawk_dma_flush_range)
270 bic r0, r0, #CACHE_DLINESIZE - 1
2711:
272 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
273 add r0, r0, #CACHE_DLINESIZE
274 cmp r0, r1
275 blo 1b
276 mcr p15, 0, r0, c7, c10, 4 @ drain WB
277 ret lr
278SYM_FUNC_END(mohawk_dma_flush_range)
279
280/*
281 * dma_map_area(start, size, dir)
282 * - start - kernel virtual start address
283 * - size - size of region
284 * - dir - DMA direction
285 */
286SYM_TYPED_FUNC_START(mohawk_dma_map_area)
287 add r1, r1, r0
288 cmp r2, #DMA_TO_DEVICE
289 beq mohawk_dma_clean_range
290 bcs mohawk_dma_inv_range
291 b mohawk_dma_flush_range
292SYM_FUNC_END(mohawk_dma_map_area)
293
294/*
295 * dma_unmap_area(start, size, dir)
296 * - start - kernel virtual start address
297 * - size - size of region
298 * - dir - DMA direction
299 */
300SYM_TYPED_FUNC_START(mohawk_dma_unmap_area)
301 ret lr
302SYM_FUNC_END(mohawk_dma_unmap_area)
303
304SYM_TYPED_FUNC_START(cpu_mohawk_dcache_clean_area)
3051: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
306 add r0, r0, #CACHE_DLINESIZE
307 subs r1, r1, #CACHE_DLINESIZE
308 bhi 1b
309 mcr p15, 0, r0, c7, c10, 4 @ drain WB
310 ret lr
311SYM_FUNC_END(cpu_mohawk_dcache_clean_area)
312
313/*
314 * cpu_mohawk_switch_mm(pgd)
315 *
316 * Set the translation base pointer to be as described by pgd.
317 *
318 * pgd: new page tables
319 */
320 .align 5
321SYM_TYPED_FUNC_START(cpu_mohawk_switch_mm)
322 mov ip, #0
323 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
324 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
325 mcr p15, 0, ip, c7, c10, 4 @ drain WB
326 orr r0, r0, #0x18 @ cache the page table in L2
327 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
328 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
329 ret lr
330SYM_FUNC_END(cpu_mohawk_switch_mm)
331
332/*
333 * cpu_mohawk_set_pte_ext(ptep, pte, ext)
334 *
335 * Set a PTE and flush it out
336 */
337 .align 5
338SYM_TYPED_FUNC_START(cpu_mohawk_set_pte_ext)
339#ifdef CONFIG_MMU
340 armv3_set_pte_ext
341 mov r0, r0
342 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
343 mcr p15, 0, r0, c7, c10, 4 @ drain WB
344 ret lr
345#endif
346SYM_FUNC_END(cpu_mohawk_set_pte_ext)
347
348.globl cpu_mohawk_suspend_size
349.equ cpu_mohawk_suspend_size, 4 * 6
350#ifdef CONFIG_ARM_CPU_SUSPEND
351SYM_TYPED_FUNC_START(cpu_mohawk_do_suspend)
352 stmfd sp!, {r4 - r9, lr}
353 mrc p14, 0, r4, c6, c0, 0 @ clock configuration, for turbo mode
354 mrc p15, 0, r5, c15, c1, 0 @ CP access reg
355 mrc p15, 0, r6, c13, c0, 0 @ PID
356 mrc p15, 0, r7, c3, c0, 0 @ domain ID
357 mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
358 mrc p15, 0, r9, c1, c0, 0 @ control reg
359 bic r4, r4, #2 @ clear frequency change bit
360 stmia r0, {r4 - r9} @ store cp regs
361 ldmia sp!, {r4 - r9, pc}
362SYM_FUNC_END(cpu_mohawk_do_suspend)
363
364SYM_TYPED_FUNC_START(cpu_mohawk_do_resume)
365 ldmia r0, {r4 - r9} @ load cp regs
366 mov ip, #0
367 mcr p15, 0, ip, c7, c7, 0 @ invalidate I & D caches, BTB
368 mcr p15, 0, ip, c7, c10, 4 @ drain write (&fill) buffer
369 mcr p15, 0, ip, c7, c5, 4 @ flush prefetch buffer
370 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
371 mcr p14, 0, r4, c6, c0, 0 @ clock configuration, turbo mode.
372 mcr p15, 0, r5, c15, c1, 0 @ CP access reg
373 mcr p15, 0, r6, c13, c0, 0 @ PID
374 mcr p15, 0, r7, c3, c0, 0 @ domain ID
375 orr r1, r1, #0x18 @ cache the page table in L2
376 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr
377 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
378 mov r0, r9 @ control register
379 b cpu_resume_mmu
380SYM_FUNC_END(cpu_mohawk_do_resume)
381#endif
382
383 .type __mohawk_setup, #function
384__mohawk_setup:
385 mov r0, #0
386 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches
387 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
388 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs
389 orr r4, r4, #0x18 @ cache the page table in L2
390 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
391
392 mov r0, #0 @ don't allow CP access
393 mcr p15, 0, r0, c15, c1, 0 @ write CP access register
394
395 adr r5, mohawk_crval
396 ldmia r5, {r5, r6}
397 mrc p15, 0, r0, c1, c0 @ get control register
398 bic r0, r0, r5
399 orr r0, r0, r6
400 ret lr
401
402 .size __mohawk_setup, . - __mohawk_setup
403
404 /*
405 * R
406 * .RVI ZFRS BLDP WCAM
407 * .011 1001 ..00 0101
408 *
409 */
410 .type mohawk_crval, #object
411mohawk_crval:
412 crval clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134
413
414 __INITDATA
415
416 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
417 define_processor_functions mohawk, dabort=v5t_early_abort, pabort=legacy_pabort
418
419 .section ".rodata"
420
421 string cpu_arch_name, "armv5te"
422 string cpu_elf_name, "v5"
423 string cpu_mohawk_name, "Marvell 88SV331x"
424
425 .align
426
427 .section ".proc.info.init", "a"
428
429 .type __88sv331x_proc_info,#object
430__88sv331x_proc_info:
431 .long 0x56158000 @ Marvell 88SV331x (MOHAWK)
432 .long 0xfffff000
433 .long PMD_TYPE_SECT | \
434 PMD_SECT_BUFFERABLE | \
435 PMD_SECT_CACHEABLE | \
436 PMD_BIT4 | \
437 PMD_SECT_AP_WRITE | \
438 PMD_SECT_AP_READ
439 .long PMD_TYPE_SECT | \
440 PMD_BIT4 | \
441 PMD_SECT_AP_WRITE | \
442 PMD_SECT_AP_READ
443 initfn __mohawk_setup, __88sv331x_proc_info
444 .long cpu_arch_name
445 .long cpu_elf_name
446 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
447 .long cpu_mohawk_name
448 .long mohawk_processor_functions
449 .long v4wbi_tlb_fns
450 .long v4wb_user_fns
451 .long mohawk_cache_fns
452 .size __88sv331x_proc_info, . - __88sv331x_proc_info