Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright 2002 Andi Kleen, SuSE Labs */
3
4#include <linux/linkage.h>
5#include <asm/cpufeatures.h>
6#include <asm/alternative-asm.h>
7
8.weak memset
9
10/*
11 * ISO C memset - set a memory block to a byte value. This function uses fast
12 * string to get better performance than the original function. The code is
13 * simpler and shorter than the original function as well.
14 *
15 * rdi destination
16 * rsi value (char)
17 * rdx count (bytes)
18 *
19 * rax original destination
20 */
21ENTRY(memset)
22ENTRY(__memset)
23 /*
24 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
25 * to use it when possible. If not available, use fast string instructions.
26 *
27 * Otherwise, use original memset function.
28 */
29 ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
30 "jmp memset_erms", X86_FEATURE_ERMS
31
32 movq %rdi,%r9
33 movq %rdx,%rcx
34 andl $7,%edx
35 shrq $3,%rcx
36 /* expand byte value */
37 movzbl %sil,%esi
38 movabs $0x0101010101010101,%rax
39 imulq %rsi,%rax
40 rep stosq
41 movl %edx,%ecx
42 rep stosb
43 movq %r9,%rax
44 ret
45ENDPROC(memset)
46ENDPROC(__memset)
47
48/*
49 * ISO C memset - set a memory block to a byte value. This function uses
50 * enhanced rep stosb to override the fast string function.
51 * The code is simpler and shorter than the fast string function as well.
52 *
53 * rdi destination
54 * rsi value (char)
55 * rdx count (bytes)
56 *
57 * rax original destination
58 */
59ENTRY(memset_erms)
60 movq %rdi,%r9
61 movb %sil,%al
62 movq %rdx,%rcx
63 rep stosb
64 movq %r9,%rax
65 ret
66ENDPROC(memset_erms)
67
68ENTRY(memset_orig)
69 movq %rdi,%r10
70
71 /* expand byte value */
72 movzbl %sil,%ecx
73 movabs $0x0101010101010101,%rax
74 imulq %rcx,%rax
75
76 /* align dst */
77 movl %edi,%r9d
78 andl $7,%r9d
79 jnz .Lbad_alignment
80.Lafter_bad_alignment:
81
82 movq %rdx,%rcx
83 shrq $6,%rcx
84 jz .Lhandle_tail
85
86 .p2align 4
87.Lloop_64:
88 decq %rcx
89 movq %rax,(%rdi)
90 movq %rax,8(%rdi)
91 movq %rax,16(%rdi)
92 movq %rax,24(%rdi)
93 movq %rax,32(%rdi)
94 movq %rax,40(%rdi)
95 movq %rax,48(%rdi)
96 movq %rax,56(%rdi)
97 leaq 64(%rdi),%rdi
98 jnz .Lloop_64
99
100 /* Handle tail in loops. The loops should be faster than hard
101 to predict jump tables. */
102 .p2align 4
103.Lhandle_tail:
104 movl %edx,%ecx
105 andl $63&(~7),%ecx
106 jz .Lhandle_7
107 shrl $3,%ecx
108 .p2align 4
109.Lloop_8:
110 decl %ecx
111 movq %rax,(%rdi)
112 leaq 8(%rdi),%rdi
113 jnz .Lloop_8
114
115.Lhandle_7:
116 andl $7,%edx
117 jz .Lende
118 .p2align 4
119.Lloop_1:
120 decl %edx
121 movb %al,(%rdi)
122 leaq 1(%rdi),%rdi
123 jnz .Lloop_1
124
125.Lende:
126 movq %r10,%rax
127 ret
128
129.Lbad_alignment:
130 cmpq $7,%rdx
131 jbe .Lhandle_7
132 movq %rax,(%rdi) /* unaligned store */
133 movq $8,%r8
134 subq %r9,%r8
135 addq %r8,%rdi
136 subq %r8,%rdx
137 jmp .Lafter_bad_alignment
138.Lfinal:
139ENDPROC(memset_orig)
1/* SPDX-License-Identifier: GPL-2.0 */
2/* Copyright 2002 Andi Kleen, SuSE Labs */
3
4#include <linux/export.h>
5#include <linux/linkage.h>
6#include <asm/cpufeatures.h>
7#include <asm/alternative.h>
8
9.section .noinstr.text, "ax"
10
11/*
12 * ISO C memset - set a memory block to a byte value. This function uses fast
13 * string to get better performance than the original function. The code is
14 * simpler and shorter than the original function as well.
15 *
16 * rdi destination
17 * rsi value (char)
18 * rdx count (bytes)
19 *
20 * rax original destination
21 *
22 * The FSRS alternative should be done inline (avoiding the call and
23 * the disgusting return handling), but that would require some help
24 * from the compiler for better calling conventions.
25 *
26 * The 'rep stosb' itself is small enough to replace the call, but all
27 * the register moves blow up the code. And two of them are "needed"
28 * only for the return value that is the same as the source input,
29 * which the compiler could/should do much better anyway.
30 */
31SYM_FUNC_START(__memset)
32 ALTERNATIVE "jmp memset_orig", "", X86_FEATURE_FSRS
33
34 movq %rdi,%r9
35 movb %sil,%al
36 movq %rdx,%rcx
37 rep stosb
38 movq %r9,%rax
39 RET
40SYM_FUNC_END(__memset)
41EXPORT_SYMBOL(__memset)
42
43SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
44EXPORT_SYMBOL(memset)
45
46SYM_FUNC_START_LOCAL(memset_orig)
47 movq %rdi,%r10
48
49 /* expand byte value */
50 movzbl %sil,%ecx
51 movabs $0x0101010101010101,%rax
52 imulq %rcx,%rax
53
54 /* align dst */
55 movl %edi,%r9d
56 andl $7,%r9d
57 jnz .Lbad_alignment
58.Lafter_bad_alignment:
59
60 movq %rdx,%rcx
61 shrq $6,%rcx
62 jz .Lhandle_tail
63
64 .p2align 4
65.Lloop_64:
66 decq %rcx
67 movq %rax,(%rdi)
68 movq %rax,8(%rdi)
69 movq %rax,16(%rdi)
70 movq %rax,24(%rdi)
71 movq %rax,32(%rdi)
72 movq %rax,40(%rdi)
73 movq %rax,48(%rdi)
74 movq %rax,56(%rdi)
75 leaq 64(%rdi),%rdi
76 jnz .Lloop_64
77
78 /* Handle tail in loops. The loops should be faster than hard
79 to predict jump tables. */
80 .p2align 4
81.Lhandle_tail:
82 movl %edx,%ecx
83 andl $63&(~7),%ecx
84 jz .Lhandle_7
85 shrl $3,%ecx
86 .p2align 4
87.Lloop_8:
88 decl %ecx
89 movq %rax,(%rdi)
90 leaq 8(%rdi),%rdi
91 jnz .Lloop_8
92
93.Lhandle_7:
94 andl $7,%edx
95 jz .Lende
96 .p2align 4
97.Lloop_1:
98 decl %edx
99 movb %al,(%rdi)
100 leaq 1(%rdi),%rdi
101 jnz .Lloop_1
102
103.Lende:
104 movq %r10,%rax
105 RET
106
107.Lbad_alignment:
108 cmpq $7,%rdx
109 jbe .Lhandle_7
110 movq %rax,(%rdi) /* unaligned store */
111 movq $8,%r8
112 subq %r9,%r8
113 addq %r8,%rdi
114 subq %r8,%rdx
115 jmp .Lafter_bad_alignment
116.Lfinal:
117SYM_FUNC_END(memset_orig)