Loading...
1/*
2 * Cache flushing routines.
3 *
4 * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * 05/28/05 Zoltan Menyhart Dynamic stride size
8 */
9
10#include <asm/asmmacro.h>
11#include <asm/export.h>
12
13
14 /*
15 * flush_icache_range(start,end)
16 *
17 * Make i-cache(s) coherent with d-caches.
18 *
19 * Must deal with range from start to end-1 but nothing else (need to
20 * be careful not to touch addresses that may be unmapped).
21 *
22 * Note: "in0" and "in1" are preserved for debugging purposes.
23 */
24 .section .kprobes.text,"ax"
25GLOBAL_ENTRY(flush_icache_range)
26
27 .prologue
28 alloc r2=ar.pfs,2,0,0,0
29 movl r3=ia64_i_cache_stride_shift
30 mov r21=1
31 ;;
32 ld8 r20=[r3] // r20: stride shift
33 sub r22=in1,r0,1 // last byte address
34 ;;
35 shr.u r23=in0,r20 // start / (stride size)
36 shr.u r22=r22,r20 // (last byte address) / (stride size)
37 shl r21=r21,r20 // r21: stride size of the i-cache(s)
38 ;;
39 sub r8=r22,r23 // number of strides - 1
40 shl r24=r23,r20 // r24: addresses for "fc.i" =
41 // "start" rounded down to stride boundary
42 .save ar.lc,r3
43 mov r3=ar.lc // save ar.lc
44 ;;
45
46 .body
47 mov ar.lc=r8
48 ;;
49 /*
50 * 32 byte aligned loop, even number of (actually 2) bundles
51 */
52.Loop: fc.i r24 // issuable on M0 only
53 add r24=r21,r24 // we flush "stride size" bytes per iteration
54 nop.i 0
55 br.cloop.sptk.few .Loop
56 ;;
57 sync.i
58 ;;
59 srlz.i
60 ;;
61 mov ar.lc=r3 // restore ar.lc
62 br.ret.sptk.many rp
63END(flush_icache_range)
64EXPORT_SYMBOL_GPL(flush_icache_range)
65
66 /*
67 * clflush_cache_range(start,size)
68 *
69 * Flush cache lines from start to start+size-1.
70 *
71 * Must deal with range from start to start+size-1 but nothing else
72 * (need to be careful not to touch addresses that may be
73 * unmapped).
74 *
75 * Note: "in0" and "in1" are preserved for debugging purposes.
76 */
77 .section .kprobes.text,"ax"
78GLOBAL_ENTRY(clflush_cache_range)
79
80 .prologue
81 alloc r2=ar.pfs,2,0,0,0
82 movl r3=ia64_cache_stride_shift
83 mov r21=1
84 add r22=in1,in0
85 ;;
86 ld8 r20=[r3] // r20: stride shift
87 sub r22=r22,r0,1 // last byte address
88 ;;
89 shr.u r23=in0,r20 // start / (stride size)
90 shr.u r22=r22,r20 // (last byte address) / (stride size)
91 shl r21=r21,r20 // r21: stride size of the i-cache(s)
92 ;;
93 sub r8=r22,r23 // number of strides - 1
94 shl r24=r23,r20 // r24: addresses for "fc" =
95 // "start" rounded down to stride
96 // boundary
97 .save ar.lc,r3
98 mov r3=ar.lc // save ar.lc
99 ;;
100
101 .body
102 mov ar.lc=r8
103 ;;
104 /*
105 * 32 byte aligned loop, even number of (actually 2) bundles
106 */
107.Loop_fc:
108 fc r24 // issuable on M0 only
109 add r24=r21,r24 // we flush "stride size" bytes per iteration
110 nop.i 0
111 br.cloop.sptk.few .Loop_fc
112 ;;
113 sync.i
114 ;;
115 srlz.i
116 ;;
117 mov ar.lc=r3 // restore ar.lc
118 br.ret.sptk.many rp
119END(clflush_cache_range)
1/*
2 * Cache flushing routines.
3 *
4 * Copyright (C) 1999-2001, 2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 *
7 * 05/28/05 Zoltan Menyhart Dynamic stride size
8 */
9
10#include <asm/asmmacro.h>
11
12
13 /*
14 * flush_icache_range(start,end)
15 *
16 * Make i-cache(s) coherent with d-caches.
17 *
18 * Must deal with range from start to end-1 but nothing else (need to
19 * be careful not to touch addresses that may be unmapped).
20 *
21 * Note: "in0" and "in1" are preserved for debugging purposes.
22 */
23 .section .kprobes.text,"ax"
24GLOBAL_ENTRY(flush_icache_range)
25
26 .prologue
27 alloc r2=ar.pfs,2,0,0,0
28 movl r3=ia64_i_cache_stride_shift
29 mov r21=1
30 ;;
31 ld8 r20=[r3] // r20: stride shift
32 sub r22=in1,r0,1 // last byte address
33 ;;
34 shr.u r23=in0,r20 // start / (stride size)
35 shr.u r22=r22,r20 // (last byte address) / (stride size)
36 shl r21=r21,r20 // r21: stride size of the i-cache(s)
37 ;;
38 sub r8=r22,r23 // number of strides - 1
39 shl r24=r23,r20 // r24: addresses for "fc.i" =
40 // "start" rounded down to stride boundary
41 .save ar.lc,r3
42 mov r3=ar.lc // save ar.lc
43 ;;
44
45 .body
46 mov ar.lc=r8
47 ;;
48 /*
49 * 32 byte aligned loop, even number of (actually 2) bundles
50 */
51.Loop: fc.i r24 // issuable on M0 only
52 add r24=r21,r24 // we flush "stride size" bytes per iteration
53 nop.i 0
54 br.cloop.sptk.few .Loop
55 ;;
56 sync.i
57 ;;
58 srlz.i
59 ;;
60 mov ar.lc=r3 // restore ar.lc
61 br.ret.sptk.many rp
62END(flush_icache_range)
63
64 /*
65 * clflush_cache_range(start,size)
66 *
67 * Flush cache lines from start to start+size-1.
68 *
69 * Must deal with range from start to start+size-1 but nothing else
70 * (need to be careful not to touch addresses that may be
71 * unmapped).
72 *
73 * Note: "in0" and "in1" are preserved for debugging purposes.
74 */
75 .section .kprobes.text,"ax"
76GLOBAL_ENTRY(clflush_cache_range)
77
78 .prologue
79 alloc r2=ar.pfs,2,0,0,0
80 movl r3=ia64_cache_stride_shift
81 mov r21=1
82 add r22=in1,in0
83 ;;
84 ld8 r20=[r3] // r20: stride shift
85 sub r22=r22,r0,1 // last byte address
86 ;;
87 shr.u r23=in0,r20 // start / (stride size)
88 shr.u r22=r22,r20 // (last byte address) / (stride size)
89 shl r21=r21,r20 // r21: stride size of the i-cache(s)
90 ;;
91 sub r8=r22,r23 // number of strides - 1
92 shl r24=r23,r20 // r24: addresses for "fc" =
93 // "start" rounded down to stride
94 // boundary
95 .save ar.lc,r3
96 mov r3=ar.lc // save ar.lc
97 ;;
98
99 .body
100 mov ar.lc=r8
101 ;;
102 /*
103 * 32 byte aligned loop, even number of (actually 2) bundles
104 */
105.Loop_fc:
106 fc r24 // issuable on M0 only
107 add r24=r21,r24 // we flush "stride size" bytes per iteration
108 nop.i 0
109 br.cloop.sptk.few .Loop_fc
110 ;;
111 sync.i
112 ;;
113 srlz.i
114 ;;
115 mov ar.lc=r3 // restore ar.lc
116 br.ret.sptk.many rp
117END(clflush_cache_range)