Loading...
1/*
2 * linux/arch/arm/lib/memset.S
3 *
4 * Copyright (C) 1995-2000 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * ASM optimised string functions
11 */
12#include <linux/linkage.h>
13#include <asm/assembler.h>
14
15 .text
16 .align 5
17
18ENTRY(memset)
19 ands r3, r0, #3 @ 1 unaligned?
20 mov ip, r0 @ preserve r0 as return value
21 bne 6f @ 1
22/*
23 * we know that the pointer in ip is aligned to a word boundary.
24 */
251: orr r1, r1, r1, lsl #8
26 orr r1, r1, r1, lsl #16
27 mov r3, r1
28 cmp r2, #16
29 blt 4f
30
31#if ! CALGN(1)+0
32
33/*
34 * We need 2 extra registers for this loop - use r8 and the LR
35 */
36 stmfd sp!, {r8, lr}
37 mov r8, r1
38 mov lr, r1
39
402: subs r2, r2, #64
41 stmgeia ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
42 stmgeia ip!, {r1, r3, r8, lr}
43 stmgeia ip!, {r1, r3, r8, lr}
44 stmgeia ip!, {r1, r3, r8, lr}
45 bgt 2b
46 ldmeqfd sp!, {r8, pc} @ Now <64 bytes to go.
47/*
48 * No need to correct the count; we're only testing bits from now on
49 */
50 tst r2, #32
51 stmneia ip!, {r1, r3, r8, lr}
52 stmneia ip!, {r1, r3, r8, lr}
53 tst r2, #16
54 stmneia ip!, {r1, r3, r8, lr}
55 ldmfd sp!, {r8, lr}
56
57#else
58
59/*
60 * This version aligns the destination pointer in order to write
61 * whole cache lines at once.
62 */
63
64 stmfd sp!, {r4-r8, lr}
65 mov r4, r1
66 mov r5, r1
67 mov r6, r1
68 mov r7, r1
69 mov r8, r1
70 mov lr, r1
71
72 cmp r2, #96
73 tstgt ip, #31
74 ble 3f
75
76 and r8, ip, #31
77 rsb r8, r8, #32
78 sub r2, r2, r8
79 movs r8, r8, lsl #(32 - 4)
80 stmcsia ip!, {r4, r5, r6, r7}
81 stmmiia ip!, {r4, r5}
82 tst r8, #(1 << 30)
83 mov r8, r1
84 strne r1, [ip], #4
85
863: subs r2, r2, #64
87 stmgeia ip!, {r1, r3-r8, lr}
88 stmgeia ip!, {r1, r3-r8, lr}
89 bgt 3b
90 ldmeqfd sp!, {r4-r8, pc}
91
92 tst r2, #32
93 stmneia ip!, {r1, r3-r8, lr}
94 tst r2, #16
95 stmneia ip!, {r4-r7}
96 ldmfd sp!, {r4-r8, lr}
97
98#endif
99
1004: tst r2, #8
101 stmneia ip!, {r1, r3}
102 tst r2, #4
103 strne r1, [ip], #4
104/*
105 * When we get here, we've got less than 4 bytes to zero. We
106 * may have an unaligned pointer as well.
107 */
1085: tst r2, #2
109 strneb r1, [ip], #1
110 strneb r1, [ip], #1
111 tst r2, #1
112 strneb r1, [ip], #1
113 mov pc, lr
114
1156: subs r2, r2, #4 @ 1 do we have enough
116 blt 5b @ 1 bytes to align with?
117 cmp r3, #2 @ 1
118 strltb r1, [ip], #1 @ 1
119 strleb r1, [ip], #1 @ 1
120 strb r1, [ip], #1 @ 1
121 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
122 b 1b
123ENDPROC(memset)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * linux/arch/arm/lib/memset.S
4 *
5 * Copyright (C) 1995-2000 Russell King
6 *
7 * ASM optimised string functions
8 */
9#include <linux/linkage.h>
10#include <asm/assembler.h>
11#include <asm/unwind.h>
12
13 .text
14 .align 5
15
16ENTRY(__memset)
17ENTRY(mmioset)
18WEAK(memset)
19UNWIND( .fnstart )
20 ands r3, r0, #3 @ 1 unaligned?
21 mov ip, r0 @ preserve r0 as return value
22 bne 6f @ 1
23/*
24 * we know that the pointer in ip is aligned to a word boundary.
25 */
261: orr r1, r1, r1, lsl #8
27 orr r1, r1, r1, lsl #16
28 mov r3, r1
297: cmp r2, #16
30 blt 4f
31UNWIND( .fnend )
32
33#if ! CALGN(1)+0
34
35/*
36 * We need 2 extra registers for this loop - use r8 and the LR
37 */
38UNWIND( .fnstart )
39UNWIND( .save {r8, lr} )
40 stmfd sp!, {r8, lr}
41 mov r8, r1
42 mov lr, r3
43
442: subs r2, r2, #64
45 stmiage ip!, {r1, r3, r8, lr} @ 64 bytes at a time.
46 stmiage ip!, {r1, r3, r8, lr}
47 stmiage ip!, {r1, r3, r8, lr}
48 stmiage ip!, {r1, r3, r8, lr}
49 bgt 2b
50 ldmfdeq sp!, {r8, pc} @ Now <64 bytes to go.
51/*
52 * No need to correct the count; we're only testing bits from now on
53 */
54 tst r2, #32
55 stmiane ip!, {r1, r3, r8, lr}
56 stmiane ip!, {r1, r3, r8, lr}
57 tst r2, #16
58 stmiane ip!, {r1, r3, r8, lr}
59 ldmfd sp!, {r8, lr}
60UNWIND( .fnend )
61
62#else
63
64/*
65 * This version aligns the destination pointer in order to write
66 * whole cache lines at once.
67 */
68
69UNWIND( .fnstart )
70UNWIND( .save {r4-r8, lr} )
71 stmfd sp!, {r4-r8, lr}
72 mov r4, r1
73 mov r5, r3
74 mov r6, r1
75 mov r7, r3
76 mov r8, r1
77 mov lr, r3
78
79 cmp r2, #96
80 tstgt ip, #31
81 ble 3f
82
83 and r8, ip, #31
84 rsb r8, r8, #32
85 sub r2, r2, r8
86 movs r8, r8, lsl #(32 - 4)
87 stmiacs ip!, {r4, r5, r6, r7}
88 stmiami ip!, {r4, r5}
89 tst r8, #(1 << 30)
90 mov r8, r1
91 strne r1, [ip], #4
92
933: subs r2, r2, #64
94 stmiage ip!, {r1, r3-r8, lr}
95 stmiage ip!, {r1, r3-r8, lr}
96 bgt 3b
97 ldmfdeq sp!, {r4-r8, pc}
98
99 tst r2, #32
100 stmiane ip!, {r1, r3-r8, lr}
101 tst r2, #16
102 stmiane ip!, {r4-r7}
103 ldmfd sp!, {r4-r8, lr}
104UNWIND( .fnend )
105
106#endif
107
108UNWIND( .fnstart )
1094: tst r2, #8
110 stmiane ip!, {r1, r3}
111 tst r2, #4
112 strne r1, [ip], #4
113/*
114 * When we get here, we've got less than 4 bytes to set. We
115 * may have an unaligned pointer as well.
116 */
1175: tst r2, #2
118 strbne r1, [ip], #1
119 strbne r1, [ip], #1
120 tst r2, #1
121 strbne r1, [ip], #1
122 ret lr
123
1246: subs r2, r2, #4 @ 1 do we have enough
125 blt 5b @ 1 bytes to align with?
126 cmp r3, #2 @ 1
127 strblt r1, [ip], #1 @ 1
128 strble r1, [ip], #1 @ 1
129 strb r1, [ip], #1 @ 1
130 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
131 b 1b
132UNWIND( .fnend )
133ENDPROC(memset)
134ENDPROC(mmioset)
135ENDPROC(__memset)
136
137ENTRY(__memset32)
138UNWIND( .fnstart )
139 mov r3, r1 @ copy r1 to r3 and fall into memset64
140UNWIND( .fnend )
141ENDPROC(__memset32)
142ENTRY(__memset64)
143UNWIND( .fnstart )
144 mov ip, r0 @ preserve r0 as return value
145 b 7b @ jump into the middle of memset
146UNWIND( .fnend )
147ENDPROC(__memset64)