Loading...
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * linux/arch/arm/mm/proc-arm1022.S: MMU functions for ARM1022E
4 *
5 * Copyright (C) 2000 ARM Limited
6 * Copyright (C) 2000 Deep Blue Solutions Ltd.
7 * hacked for non-paged-MM by Hyok S. Choi, 2003.
8 *
9 * These are the low level assembler for performing cache and TLB
10 * functions on the ARM1022E.
11 */
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <linux/cfi_types.h>
15#include <linux/pgtable.h>
16#include <asm/assembler.h>
17#include <asm/asm-offsets.h>
18#include <asm/hwcap.h>
19#include <asm/pgtable-hwdef.h>
20#include <asm/ptrace.h>
21
22#include "proc-macros.S"
23
24/*
25 * This is the maximum size of an area which will be invalidated
26 * using the single invalidate entry instructions. Anything larger
27 * than this, and we go for the whole cache.
28 *
29 * This value should be chosen such that we choose the cheapest
30 * alternative.
31 */
32#define MAX_AREA_SIZE 32768
33
34/*
35 * The size of one data cache line.
36 */
37#define CACHE_DLINESIZE 32
38
39/*
40 * The number of data cache segments.
41 */
42#define CACHE_DSEGMENTS 16
43
44/*
45 * The number of lines in a cache segment.
46 */
47#define CACHE_DENTRIES 64
48
49/*
50 * This is the size at which it becomes more efficient to
51 * clean the whole cache, rather than using the individual
52 * cache line maintenance instructions.
53 */
54#define CACHE_DLIMIT 32768
55
56 .text
57/*
58 * cpu_arm1022_proc_init()
59 */
60SYM_TYPED_FUNC_START(cpu_arm1022_proc_init)
61 ret lr
62SYM_FUNC_END(cpu_arm1022_proc_init)
63
64/*
65 * cpu_arm1022_proc_fin()
66 */
67SYM_TYPED_FUNC_START(cpu_arm1022_proc_fin)
68 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
69 bic r0, r0, #0x1000 @ ...i............
70 bic r0, r0, #0x000e @ ............wca.
71 mcr p15, 0, r0, c1, c0, 0 @ disable caches
72 ret lr
73SYM_FUNC_END(cpu_arm1022_proc_fin)
74
75/*
76 * cpu_arm1022_reset(loc)
77 *
78 * Perform a soft reset of the system. Put the CPU into the
79 * same state as it would be if it had been reset, and branch
80 * to what would be the reset vector.
81 *
82 * loc: location to jump to for soft reset
83 */
84 .align 5
85 .pushsection .idmap.text, "ax"
86SYM_TYPED_FUNC_START(cpu_arm1022_reset)
87 mov ip, #0
88 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
89 mcr p15, 0, ip, c7, c10, 4 @ drain WB
90#ifdef CONFIG_MMU
91 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
92#endif
93 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
94 bic ip, ip, #0x000f @ ............wcam
95 bic ip, ip, #0x1100 @ ...i...s........
96 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
97 ret r0
98SYM_FUNC_END(cpu_arm1022_reset)
99 .popsection
100
101/*
102 * cpu_arm1022_do_idle()
103 */
104 .align 5
105SYM_TYPED_FUNC_START(cpu_arm1022_do_idle)
106 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
107 ret lr
108SYM_FUNC_END(cpu_arm1022_do_idle)
109
110/* ================================= CACHE ================================ */
111
112 .align 5
113
114/*
115 * flush_icache_all()
116 *
117 * Unconditionally clean and invalidate the entire icache.
118 */
119SYM_TYPED_FUNC_START(arm1022_flush_icache_all)
120#ifndef CONFIG_CPU_ICACHE_DISABLE
121 mov r0, #0
122 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
123#endif
124 ret lr
125SYM_FUNC_END(arm1022_flush_icache_all)
126
127/*
128 * flush_user_cache_all()
129 *
130 * Invalidate all cache entries in a particular address
131 * space.
132 */
133SYM_FUNC_ALIAS(arm1022_flush_user_cache_all, arm1022_flush_kern_cache_all)
134
135/*
136 * flush_kern_cache_all()
137 *
138 * Clean and invalidate the entire cache.
139 */
140SYM_TYPED_FUNC_START(arm1022_flush_kern_cache_all)
141 mov r2, #VM_EXEC
142 mov ip, #0
143__flush_whole_cache:
144#ifndef CONFIG_CPU_DCACHE_DISABLE
145 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
1461: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1472: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
148 subs r3, r3, #1 << 26
149 bcs 2b @ entries 63 to 0
150 subs r1, r1, #1 << 5
151 bcs 1b @ segments 15 to 0
152#endif
153 tst r2, #VM_EXEC
154#ifndef CONFIG_CPU_ICACHE_DISABLE
155 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
156#endif
157 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
158 ret lr
159SYM_FUNC_END(arm1022_flush_kern_cache_all)
160
161/*
162 * flush_user_cache_range(start, end, flags)
163 *
164 * Invalidate a range of cache entries in the specified
165 * address space.
166 *
167 * - start - start address (inclusive)
168 * - end - end address (exclusive)
169 * - flags - vm_flags for this space
170 */
171SYM_TYPED_FUNC_START(arm1022_flush_user_cache_range)
172 mov ip, #0
173 sub r3, r1, r0 @ calculate total size
174 cmp r3, #CACHE_DLIMIT
175 bhs __flush_whole_cache
176
177#ifndef CONFIG_CPU_DCACHE_DISABLE
1781: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
179 add r0, r0, #CACHE_DLINESIZE
180 cmp r0, r1
181 blo 1b
182#endif
183 tst r2, #VM_EXEC
184#ifndef CONFIG_CPU_ICACHE_DISABLE
185 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
186#endif
187 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
188 ret lr
189SYM_FUNC_END(arm1022_flush_user_cache_range)
190
191/*
192 * coherent_kern_range(start, end)
193 *
194 * Ensure coherency between the Icache and the Dcache in the
195 * region described by start. If you have non-snooping
196 * Harvard caches, you need to implement this function.
197 *
198 * - start - virtual start address
199 * - end - virtual end address
200 */
201SYM_TYPED_FUNC_START(arm1022_coherent_kern_range)
202#ifdef CONFIG_CFI_CLANG /* Fallthrough if !CFI */
203 b arm1022_coherent_user_range
204#endif
205SYM_FUNC_END(arm1022_coherent_kern_range)
206
207/*
208 * coherent_user_range(start, end)
209 *
210 * Ensure coherency between the Icache and the Dcache in the
211 * region described by start. If you have non-snooping
212 * Harvard caches, you need to implement this function.
213 *
214 * - start - virtual start address
215 * - end - virtual end address
216 */
217SYM_TYPED_FUNC_START(arm1022_coherent_user_range)
218 mov ip, #0
219 bic r0, r0, #CACHE_DLINESIZE - 1
2201:
221#ifndef CONFIG_CPU_DCACHE_DISABLE
222 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
223#endif
224#ifndef CONFIG_CPU_ICACHE_DISABLE
225 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
226#endif
227 add r0, r0, #CACHE_DLINESIZE
228 cmp r0, r1
229 blo 1b
230 mcr p15, 0, ip, c7, c10, 4 @ drain WB
231 mov r0, #0
232 ret lr
233SYM_FUNC_END(arm1022_coherent_user_range)
234
235/*
236 * flush_kern_dcache_area(void *addr, size_t size)
237 *
238 * Ensure no D cache aliasing occurs, either with itself or
239 * the I cache
240 *
241 * - addr - kernel address
242 * - size - region size
243 */
244SYM_TYPED_FUNC_START(arm1022_flush_kern_dcache_area)
245 mov ip, #0
246#ifndef CONFIG_CPU_DCACHE_DISABLE
247 add r1, r0, r1
2481: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
249 add r0, r0, #CACHE_DLINESIZE
250 cmp r0, r1
251 blo 1b
252#endif
253 mcr p15, 0, ip, c7, c10, 4 @ drain WB
254 ret lr
255SYM_FUNC_END(arm1022_flush_kern_dcache_area)
256
257/*
258 * dma_inv_range(start, end)
259 *
260 * Invalidate (discard) the specified virtual address range.
261 * May not write back any entries. If 'start' or 'end'
262 * are not cache line aligned, those lines must be written
263 * back.
264 *
265 * - start - virtual start address
266 * - end - virtual end address
267 *
268 * (same as v4wb)
269 */
270arm1022_dma_inv_range:
271 mov ip, #0
272#ifndef CONFIG_CPU_DCACHE_DISABLE
273 tst r0, #CACHE_DLINESIZE - 1
274 bic r0, r0, #CACHE_DLINESIZE - 1
275 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
276 tst r1, #CACHE_DLINESIZE - 1
277 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
2781: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
279 add r0, r0, #CACHE_DLINESIZE
280 cmp r0, r1
281 blo 1b
282#endif
283 mcr p15, 0, ip, c7, c10, 4 @ drain WB
284 ret lr
285
286/*
287 * dma_clean_range(start, end)
288 *
289 * Clean the specified virtual address range.
290 *
291 * - start - virtual start address
292 * - end - virtual end address
293 *
294 * (same as v4wb)
295 */
296arm1022_dma_clean_range:
297 mov ip, #0
298#ifndef CONFIG_CPU_DCACHE_DISABLE
299 bic r0, r0, #CACHE_DLINESIZE - 1
3001: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
301 add r0, r0, #CACHE_DLINESIZE
302 cmp r0, r1
303 blo 1b
304#endif
305 mcr p15, 0, ip, c7, c10, 4 @ drain WB
306 ret lr
307
308/*
309 * dma_flush_range(start, end)
310 *
311 * Clean and invalidate the specified virtual address range.
312 *
313 * - start - virtual start address
314 * - end - virtual end address
315 */
316SYM_TYPED_FUNC_START(arm1022_dma_flush_range)
317 mov ip, #0
318#ifndef CONFIG_CPU_DCACHE_DISABLE
319 bic r0, r0, #CACHE_DLINESIZE - 1
3201: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
321 add r0, r0, #CACHE_DLINESIZE
322 cmp r0, r1
323 blo 1b
324#endif
325 mcr p15, 0, ip, c7, c10, 4 @ drain WB
326 ret lr
327SYM_FUNC_END(arm1022_dma_flush_range)
328
329/*
330 * dma_map_area(start, size, dir)
331 * - start - kernel virtual start address
332 * - size - size of region
333 * - dir - DMA direction
334 */
335SYM_TYPED_FUNC_START(arm1022_dma_map_area)
336 add r1, r1, r0
337 cmp r2, #DMA_TO_DEVICE
338 beq arm1022_dma_clean_range
339 bcs arm1022_dma_inv_range
340 b arm1022_dma_flush_range
341SYM_FUNC_END(arm1022_dma_map_area)
342
343/*
344 * dma_unmap_area(start, size, dir)
345 * - start - kernel virtual start address
346 * - size - size of region
347 * - dir - DMA direction
348 */
349SYM_TYPED_FUNC_START(arm1022_dma_unmap_area)
350 ret lr
351SYM_FUNC_END(arm1022_dma_unmap_area)
352
353 .align 5
354SYM_TYPED_FUNC_START(cpu_arm1022_dcache_clean_area)
355#ifndef CONFIG_CPU_DCACHE_DISABLE
356 mov ip, #0
3571: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
358 add r0, r0, #CACHE_DLINESIZE
359 subs r1, r1, #CACHE_DLINESIZE
360 bhi 1b
361#endif
362 ret lr
363SYM_FUNC_END(cpu_arm1022_dcache_clean_area)
364
365/* =============================== PageTable ============================== */
366
367/*
368 * cpu_arm1022_switch_mm(pgd)
369 *
370 * Set the translation base pointer to be as described by pgd.
371 *
372 * pgd: new page tables
373 */
374 .align 5
375SYM_TYPED_FUNC_START(cpu_arm1022_switch_mm)
376#ifdef CONFIG_MMU
377#ifndef CONFIG_CPU_DCACHE_DISABLE
378 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
3791: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
3802: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
381 subs r3, r3, #1 << 26
382 bcs 2b @ entries 63 to 0
383 subs r1, r1, #1 << 5
384 bcs 1b @ segments 15 to 0
385#endif
386 mov r1, #0
387#ifndef CONFIG_CPU_ICACHE_DISABLE
388 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
389#endif
390 mcr p15, 0, r1, c7, c10, 4 @ drain WB
391 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
392 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
393#endif
394 ret lr
395SYM_FUNC_END(cpu_arm1022_switch_mm)
396
397/*
398 * cpu_arm1022_set_pte_ext(ptep, pte, ext)
399 *
400 * Set a PTE and flush it out
401 */
402 .align 5
403SYM_TYPED_FUNC_START(cpu_arm1022_set_pte_ext)
404#ifdef CONFIG_MMU
405 armv3_set_pte_ext
406 mov r0, r0
407#ifndef CONFIG_CPU_DCACHE_DISABLE
408 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
409#endif
410#endif /* CONFIG_MMU */
411 ret lr
412SYM_FUNC_END(cpu_arm1022_set_pte_ext)
413
414 .type __arm1022_setup, #function
415__arm1022_setup:
416 mov r0, #0
417 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
418 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
419#ifdef CONFIG_MMU
420 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
421#endif
422 adr r5, arm1022_crval
423 ldmia r5, {r5, r6}
424 mrc p15, 0, r0, c1, c0 @ get control register v4
425 bic r0, r0, r5
426 orr r0, r0, r6
427#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
428 orr r0, r0, #0x4000 @ .R..............
429#endif
430 ret lr
431 .size __arm1022_setup, . - __arm1022_setup
432
433 /*
434 * R
435 * .RVI ZFRS BLDP WCAM
436 * .011 1001 ..11 0101
437 *
438 */
439 .type arm1022_crval, #object
440arm1022_crval:
441 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930
442
443 __INITDATA
444 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
445 define_processor_functions arm1022, dabort=v4t_early_abort, pabort=legacy_pabort
446
447 .section ".rodata"
448
449 string cpu_arch_name, "armv5te"
450 string cpu_elf_name, "v5"
451 string cpu_arm1022_name, "ARM1022"
452
453 .align
454
455 .section ".proc.info.init", "a"
456
457 .type __arm1022_proc_info,#object
458__arm1022_proc_info:
459 .long 0x4105a220 @ ARM 1022E (v5TE)
460 .long 0xff0ffff0
461 .long PMD_TYPE_SECT | \
462 PMD_BIT4 | \
463 PMD_SECT_AP_WRITE | \
464 PMD_SECT_AP_READ
465 .long PMD_TYPE_SECT | \
466 PMD_BIT4 | \
467 PMD_SECT_AP_WRITE | \
468 PMD_SECT_AP_READ
469 initfn __arm1022_setup, __arm1022_proc_info
470 .long cpu_arch_name
471 .long cpu_elf_name
472 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP
473 .long cpu_arm1022_name
474 .long arm1022_processor_functions
475 .long v4wbi_tlb_fns
476 .long v4wb_user_fns
477 .long arm1022_cache_fns
478 .size __arm1022_proc_info, . - __arm1022_proc_info
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * linux/arch/arm/mm/proc-arm1022.S: MMU functions for ARM1022E
4 *
5 * Copyright (C) 2000 ARM Limited
6 * Copyright (C) 2000 Deep Blue Solutions Ltd.
7 * hacked for non-paged-MM by Hyok S. Choi, 2003.
8 *
9 * These are the low level assembler for performing cache and TLB
10 * functions on the ARM1022E.
11 */
12#include <linux/linkage.h>
13#include <linux/init.h>
14#include <linux/pgtable.h>
15#include <asm/assembler.h>
16#include <asm/asm-offsets.h>
17#include <asm/hwcap.h>
18#include <asm/pgtable-hwdef.h>
19#include <asm/ptrace.h>
20
21#include "proc-macros.S"
22
23/*
24 * This is the maximum size of an area which will be invalidated
25 * using the single invalidate entry instructions. Anything larger
26 * than this, and we go for the whole cache.
27 *
28 * This value should be chosen such that we choose the cheapest
29 * alternative.
30 */
31#define MAX_AREA_SIZE 32768
32
33/*
34 * The size of one data cache line.
35 */
36#define CACHE_DLINESIZE 32
37
38/*
39 * The number of data cache segments.
40 */
41#define CACHE_DSEGMENTS 16
42
43/*
44 * The number of lines in a cache segment.
45 */
46#define CACHE_DENTRIES 64
47
48/*
49 * This is the size at which it becomes more efficient to
50 * clean the whole cache, rather than using the individual
51 * cache line maintenance instructions.
52 */
53#define CACHE_DLIMIT 32768
54
55 .text
56/*
57 * cpu_arm1022_proc_init()
58 */
59ENTRY(cpu_arm1022_proc_init)
60 ret lr
61
62/*
63 * cpu_arm1022_proc_fin()
64 */
65ENTRY(cpu_arm1022_proc_fin)
66 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
67 bic r0, r0, #0x1000 @ ...i............
68 bic r0, r0, #0x000e @ ............wca.
69 mcr p15, 0, r0, c1, c0, 0 @ disable caches
70 ret lr
71
72/*
73 * cpu_arm1022_reset(loc)
74 *
75 * Perform a soft reset of the system. Put the CPU into the
76 * same state as it would be if it had been reset, and branch
77 * to what would be the reset vector.
78 *
79 * loc: location to jump to for soft reset
80 */
81 .align 5
82 .pushsection .idmap.text, "ax"
83ENTRY(cpu_arm1022_reset)
84 mov ip, #0
85 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
86 mcr p15, 0, ip, c7, c10, 4 @ drain WB
87#ifdef CONFIG_MMU
88 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
89#endif
90 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
91 bic ip, ip, #0x000f @ ............wcam
92 bic ip, ip, #0x1100 @ ...i...s........
93 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
94 ret r0
95ENDPROC(cpu_arm1022_reset)
96 .popsection
97
98/*
99 * cpu_arm1022_do_idle()
100 */
101 .align 5
102ENTRY(cpu_arm1022_do_idle)
103 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
104 ret lr
105
106/* ================================= CACHE ================================ */
107
108 .align 5
109
110/*
111 * flush_icache_all()
112 *
113 * Unconditionally clean and invalidate the entire icache.
114 */
115ENTRY(arm1022_flush_icache_all)
116#ifndef CONFIG_CPU_ICACHE_DISABLE
117 mov r0, #0
118 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
119#endif
120 ret lr
121ENDPROC(arm1022_flush_icache_all)
122
123/*
124 * flush_user_cache_all()
125 *
126 * Invalidate all cache entries in a particular address
127 * space.
128 */
129ENTRY(arm1022_flush_user_cache_all)
130 /* FALLTHROUGH */
131/*
132 * flush_kern_cache_all()
133 *
134 * Clean and invalidate the entire cache.
135 */
136ENTRY(arm1022_flush_kern_cache_all)
137 mov r2, #VM_EXEC
138 mov ip, #0
139__flush_whole_cache:
140#ifndef CONFIG_CPU_DCACHE_DISABLE
141 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
1421: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1432: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
144 subs r3, r3, #1 << 26
145 bcs 2b @ entries 63 to 0
146 subs r1, r1, #1 << 5
147 bcs 1b @ segments 15 to 0
148#endif
149 tst r2, #VM_EXEC
150#ifndef CONFIG_CPU_ICACHE_DISABLE
151 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
152#endif
153 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
154 ret lr
155
156/*
157 * flush_user_cache_range(start, end, flags)
158 *
159 * Invalidate a range of cache entries in the specified
160 * address space.
161 *
162 * - start - start address (inclusive)
163 * - end - end address (exclusive)
164 * - flags - vm_flags for this space
165 */
166ENTRY(arm1022_flush_user_cache_range)
167 mov ip, #0
168 sub r3, r1, r0 @ calculate total size
169 cmp r3, #CACHE_DLIMIT
170 bhs __flush_whole_cache
171
172#ifndef CONFIG_CPU_DCACHE_DISABLE
1731: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
174 add r0, r0, #CACHE_DLINESIZE
175 cmp r0, r1
176 blo 1b
177#endif
178 tst r2, #VM_EXEC
179#ifndef CONFIG_CPU_ICACHE_DISABLE
180 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
181#endif
182 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
183 ret lr
184
185/*
186 * coherent_kern_range(start, end)
187 *
188 * Ensure coherency between the Icache and the Dcache in the
189 * region described by start. If you have non-snooping
190 * Harvard caches, you need to implement this function.
191 *
192 * - start - virtual start address
193 * - end - virtual end address
194 */
195ENTRY(arm1022_coherent_kern_range)
196 /* FALLTHROUGH */
197
198/*
199 * coherent_user_range(start, end)
200 *
201 * Ensure coherency between the Icache and the Dcache in the
202 * region described by start. If you have non-snooping
203 * Harvard caches, you need to implement this function.
204 *
205 * - start - virtual start address
206 * - end - virtual end address
207 */
208ENTRY(arm1022_coherent_user_range)
209 mov ip, #0
210 bic r0, r0, #CACHE_DLINESIZE - 1
2111:
212#ifndef CONFIG_CPU_DCACHE_DISABLE
213 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
214#endif
215#ifndef CONFIG_CPU_ICACHE_DISABLE
216 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
217#endif
218 add r0, r0, #CACHE_DLINESIZE
219 cmp r0, r1
220 blo 1b
221 mcr p15, 0, ip, c7, c10, 4 @ drain WB
222 mov r0, #0
223 ret lr
224
225/*
226 * flush_kern_dcache_area(void *addr, size_t size)
227 *
228 * Ensure no D cache aliasing occurs, either with itself or
229 * the I cache
230 *
231 * - addr - kernel address
232 * - size - region size
233 */
234ENTRY(arm1022_flush_kern_dcache_area)
235 mov ip, #0
236#ifndef CONFIG_CPU_DCACHE_DISABLE
237 add r1, r0, r1
2381: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
239 add r0, r0, #CACHE_DLINESIZE
240 cmp r0, r1
241 blo 1b
242#endif
243 mcr p15, 0, ip, c7, c10, 4 @ drain WB
244 ret lr
245
246/*
247 * dma_inv_range(start, end)
248 *
249 * Invalidate (discard) the specified virtual address range.
250 * May not write back any entries. If 'start' or 'end'
251 * are not cache line aligned, those lines must be written
252 * back.
253 *
254 * - start - virtual start address
255 * - end - virtual end address
256 *
257 * (same as v4wb)
258 */
259arm1022_dma_inv_range:
260 mov ip, #0
261#ifndef CONFIG_CPU_DCACHE_DISABLE
262 tst r0, #CACHE_DLINESIZE - 1
263 bic r0, r0, #CACHE_DLINESIZE - 1
264 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
265 tst r1, #CACHE_DLINESIZE - 1
266 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
2671: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
268 add r0, r0, #CACHE_DLINESIZE
269 cmp r0, r1
270 blo 1b
271#endif
272 mcr p15, 0, ip, c7, c10, 4 @ drain WB
273 ret lr
274
275/*
276 * dma_clean_range(start, end)
277 *
278 * Clean the specified virtual address range.
279 *
280 * - start - virtual start address
281 * - end - virtual end address
282 *
283 * (same as v4wb)
284 */
285arm1022_dma_clean_range:
286 mov ip, #0
287#ifndef CONFIG_CPU_DCACHE_DISABLE
288 bic r0, r0, #CACHE_DLINESIZE - 1
2891: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
290 add r0, r0, #CACHE_DLINESIZE
291 cmp r0, r1
292 blo 1b
293#endif
294 mcr p15, 0, ip, c7, c10, 4 @ drain WB
295 ret lr
296
297/*
298 * dma_flush_range(start, end)
299 *
300 * Clean and invalidate the specified virtual address range.
301 *
302 * - start - virtual start address
303 * - end - virtual end address
304 */
305ENTRY(arm1022_dma_flush_range)
306 mov ip, #0
307#ifndef CONFIG_CPU_DCACHE_DISABLE
308 bic r0, r0, #CACHE_DLINESIZE - 1
3091: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
310 add r0, r0, #CACHE_DLINESIZE
311 cmp r0, r1
312 blo 1b
313#endif
314 mcr p15, 0, ip, c7, c10, 4 @ drain WB
315 ret lr
316
317/*
318 * dma_map_area(start, size, dir)
319 * - start - kernel virtual start address
320 * - size - size of region
321 * - dir - DMA direction
322 */
323ENTRY(arm1022_dma_map_area)
324 add r1, r1, r0
325 cmp r2, #DMA_TO_DEVICE
326 beq arm1022_dma_clean_range
327 bcs arm1022_dma_inv_range
328 b arm1022_dma_flush_range
329ENDPROC(arm1022_dma_map_area)
330
331/*
332 * dma_unmap_area(start, size, dir)
333 * - start - kernel virtual start address
334 * - size - size of region
335 * - dir - DMA direction
336 */
337ENTRY(arm1022_dma_unmap_area)
338 ret lr
339ENDPROC(arm1022_dma_unmap_area)
340
341 .globl arm1022_flush_kern_cache_louis
342 .equ arm1022_flush_kern_cache_louis, arm1022_flush_kern_cache_all
343
344 @ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
345 define_cache_functions arm1022
346
347 .align 5
348ENTRY(cpu_arm1022_dcache_clean_area)
349#ifndef CONFIG_CPU_DCACHE_DISABLE
350 mov ip, #0
3511: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
352 add r0, r0, #CACHE_DLINESIZE
353 subs r1, r1, #CACHE_DLINESIZE
354 bhi 1b
355#endif
356 ret lr
357
358/* =============================== PageTable ============================== */
359
360/*
361 * cpu_arm1022_switch_mm(pgd)
362 *
363 * Set the translation base pointer to be as described by pgd.
364 *
365 * pgd: new page tables
366 */
367 .align 5
368ENTRY(cpu_arm1022_switch_mm)
369#ifdef CONFIG_MMU
370#ifndef CONFIG_CPU_DCACHE_DISABLE
371 mov r1, #(CACHE_DSEGMENTS - 1) << 5 @ 16 segments
3721: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
3732: mcr p15, 0, r3, c7, c14, 2 @ clean+invalidate D index
374 subs r3, r3, #1 << 26
375 bcs 2b @ entries 63 to 0
376 subs r1, r1, #1 << 5
377 bcs 1b @ segments 15 to 0
378#endif
379 mov r1, #0
380#ifndef CONFIG_CPU_ICACHE_DISABLE
381 mcr p15, 0, r1, c7, c5, 0 @ invalidate I cache
382#endif
383 mcr p15, 0, r1, c7, c10, 4 @ drain WB
384 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
385 mcr p15, 0, r1, c8, c7, 0 @ invalidate I & D TLBs
386#endif
387 ret lr
388
389/*
390 * cpu_arm1022_set_pte_ext(ptep, pte, ext)
391 *
392 * Set a PTE and flush it out
393 */
394 .align 5
395ENTRY(cpu_arm1022_set_pte_ext)
396#ifdef CONFIG_MMU
397 armv3_set_pte_ext
398 mov r0, r0
399#ifndef CONFIG_CPU_DCACHE_DISABLE
400 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
401#endif
402#endif /* CONFIG_MMU */
403 ret lr
404
405 .type __arm1022_setup, #function
406__arm1022_setup:
407 mov r0, #0
408 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches on v4
409 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer on v4
410#ifdef CONFIG_MMU
411 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs on v4
412#endif
413 adr r5, arm1022_crval
414 ldmia r5, {r5, r6}
415 mrc p15, 0, r0, c1, c0 @ get control register v4
416 bic r0, r0, r5
417 orr r0, r0, r6
418#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
419 orr r0, r0, #0x4000 @ .R..............
420#endif
421 ret lr
422 .size __arm1022_setup, . - __arm1022_setup
423
424 /*
425 * R
426 * .RVI ZFRS BLDP WCAM
427 * .011 1001 ..11 0101
428 *
429 */
430 .type arm1022_crval, #object
431arm1022_crval:
432 crval clear=0x00007f3f, mmuset=0x00003935, ucset=0x00001930
433
434 __INITDATA
435 @ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
436 define_processor_functions arm1022, dabort=v4t_early_abort, pabort=legacy_pabort
437
438 .section ".rodata"
439
440 string cpu_arch_name, "armv5te"
441 string cpu_elf_name, "v5"
442 string cpu_arm1022_name, "ARM1022"
443
444 .align
445
446 .section ".proc.info.init", "a"
447
448 .type __arm1022_proc_info,#object
449__arm1022_proc_info:
450 .long 0x4105a220 @ ARM 1022E (v5TE)
451 .long 0xff0ffff0
452 .long PMD_TYPE_SECT | \
453 PMD_BIT4 | \
454 PMD_SECT_AP_WRITE | \
455 PMD_SECT_AP_READ
456 .long PMD_TYPE_SECT | \
457 PMD_BIT4 | \
458 PMD_SECT_AP_WRITE | \
459 PMD_SECT_AP_READ
460 initfn __arm1022_setup, __arm1022_proc_info
461 .long cpu_arch_name
462 .long cpu_elf_name
463 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB | HWCAP_EDSP
464 .long cpu_arm1022_name
465 .long arm1022_processor_functions
466 .long v4wbi_tlb_fns
467 .long v4wb_user_fns
468 .long arm1022_cache_fns
469 .size __arm1022_proc_info, . - __arm1022_proc_info