Linux Audio

Check our new training course

Linux BSP development engineering services

Need help to port Linux and bootloaders to your hardware?
Loading...
v4.10.11
 
  1/* Copyright 2002 Andi Kleen, SuSE Labs */
  2
 
  3#include <linux/linkage.h>
  4#include <asm/cpufeatures.h>
  5#include <asm/alternative-asm.h>
  6#include <asm/export.h>
  7
  8.weak memset
  9
 10/*
 11 * ISO C memset - set a memory block to a byte value. This function uses fast
 12 * string to get better performance than the original function. The code is
 13 * simpler and shorter than the original function as well.
 14 *
 15 * rdi   destination
 16 * rsi   value (char)
 17 * rdx   count (bytes)
 18 *
 19 * rax   original destination
 20 */
 21ENTRY(memset)
 22ENTRY(__memset)
 23	/*
 24	 * Some CPUs support enhanced REP MOVSB/STOSB feature. It is recommended
 25	 * to use it when possible. If not available, use fast string instructions.
 26	 *
 27	 * Otherwise, use original memset function.
 28	 */
 29	ALTERNATIVE_2 "jmp memset_orig", "", X86_FEATURE_REP_GOOD, \
 30		      "jmp memset_erms", X86_FEATURE_ERMS
 31
 32	movq %rdi,%r9
 33	movq %rdx,%rcx
 34	andl $7,%edx
 35	shrq $3,%rcx
 36	/* expand byte value  */
 37	movzbl %sil,%esi
 38	movabs $0x0101010101010101,%rax
 39	imulq %rsi,%rax
 40	rep stosq
 41	movl %edx,%ecx
 42	rep stosb
 43	movq %r9,%rax
 44	ret
 45ENDPROC(memset)
 46ENDPROC(__memset)
 47EXPORT_SYMBOL(memset)
 48EXPORT_SYMBOL(__memset)
 49
 50/*
 51 * ISO C memset - set a memory block to a byte value. This function uses
 52 * enhanced rep stosb to override the fast string function.
 53 * The code is simpler and shorter than the fast string function as well.
 54 *
 55 * rdi   destination
 56 * rsi   value (char)
 57 * rdx   count (bytes)
 58 *
 59 * rax   original destination
 
 
 
 60 */
 61ENTRY(memset_erms)
 
 
 62	movq %rdi,%r9
 63	movb %sil,%al
 64	movq %rdx,%rcx
 65	rep stosb
 66	movq %r9,%rax
 67	ret
 68ENDPROC(memset_erms)
 
 
 
 
 69
 70ENTRY(memset_orig)
 71	movq %rdi,%r10
 72
 73	/* expand byte value  */
 74	movzbl %sil,%ecx
 75	movabs $0x0101010101010101,%rax
 76	imulq  %rcx,%rax
 77
 78	/* align dst */
 79	movl  %edi,%r9d
 80	andl  $7,%r9d
 81	jnz  .Lbad_alignment
 82.Lafter_bad_alignment:
 83
 84	movq  %rdx,%rcx
 85	shrq  $6,%rcx
 86	jz	 .Lhandle_tail
 87
 88	.p2align 4
 89.Lloop_64:
 90	decq  %rcx
 91	movq  %rax,(%rdi)
 92	movq  %rax,8(%rdi)
 93	movq  %rax,16(%rdi)
 94	movq  %rax,24(%rdi)
 95	movq  %rax,32(%rdi)
 96	movq  %rax,40(%rdi)
 97	movq  %rax,48(%rdi)
 98	movq  %rax,56(%rdi)
 99	leaq  64(%rdi),%rdi
100	jnz    .Lloop_64
101
102	/* Handle tail in loops. The loops should be faster than hard
103	   to predict jump tables. */
104	.p2align 4
105.Lhandle_tail:
106	movl	%edx,%ecx
107	andl    $63&(~7),%ecx
108	jz 		.Lhandle_7
109	shrl	$3,%ecx
110	.p2align 4
111.Lloop_8:
112	decl   %ecx
113	movq  %rax,(%rdi)
114	leaq  8(%rdi),%rdi
115	jnz    .Lloop_8
116
117.Lhandle_7:
118	andl	$7,%edx
119	jz      .Lende
120	.p2align 4
121.Lloop_1:
122	decl    %edx
123	movb 	%al,(%rdi)
124	leaq	1(%rdi),%rdi
125	jnz     .Lloop_1
126
127.Lende:
128	movq	%r10,%rax
129	ret
130
131.Lbad_alignment:
132	cmpq $7,%rdx
133	jbe	.Lhandle_7
134	movq %rax,(%rdi)	/* unaligned store */
135	movq $8,%r8
136	subq %r9,%r8
137	addq %r8,%rdi
138	subq %r8,%rdx
139	jmp .Lafter_bad_alignment
140.Lfinal:
141ENDPROC(memset_orig)
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* Copyright 2002 Andi Kleen, SuSE Labs */
  3
  4#include <linux/export.h>
  5#include <linux/linkage.h>
  6#include <asm/cpufeatures.h>
  7#include <asm/alternative.h>
 
  8
  9.section .noinstr.text, "ax"
 10
 11/*
 12 * ISO C memset - set a memory block to a byte value. This function uses fast
 13 * string to get better performance than the original function. The code is
 14 * simpler and shorter than the original function as well.
 15 *
 16 * rdi   destination
 17 * rsi   value (char)
 18 * rdx   count (bytes)
 19 *
 20 * rax   original destination
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 21 *
 22 * The FSRS alternative should be done inline (avoiding the call and
 23 * the disgusting return handling), but that would require some help
 24 * from the compiler for better calling conventions.
 25 *
 26 * The 'rep stosb' itself is small enough to replace the call, but all
 27 * the register moves blow up the code. And two of them are "needed"
 28 * only for the return value that is the same as the source input,
 29 * which the compiler could/should do much better anyway.
 30 */
 31SYM_FUNC_START(__memset)
 32	ALTERNATIVE "jmp memset_orig", "", X86_FEATURE_FSRS
 33
 34	movq %rdi,%r9
 35	movb %sil,%al
 36	movq %rdx,%rcx
 37	rep stosb
 38	movq %r9,%rax
 39	RET
 40SYM_FUNC_END(__memset)
 41EXPORT_SYMBOL(__memset)
 42
 43SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
 44EXPORT_SYMBOL(memset)
 45
 46SYM_FUNC_START_LOCAL(memset_orig)
 47	movq %rdi,%r10
 48
 49	/* expand byte value  */
 50	movzbl %sil,%ecx
 51	movabs $0x0101010101010101,%rax
 52	imulq  %rcx,%rax
 53
 54	/* align dst */
 55	movl  %edi,%r9d
 56	andl  $7,%r9d
 57	jnz  .Lbad_alignment
 58.Lafter_bad_alignment:
 59
 60	movq  %rdx,%rcx
 61	shrq  $6,%rcx
 62	jz	 .Lhandle_tail
 63
 64	.p2align 4
 65.Lloop_64:
 66	decq  %rcx
 67	movq  %rax,(%rdi)
 68	movq  %rax,8(%rdi)
 69	movq  %rax,16(%rdi)
 70	movq  %rax,24(%rdi)
 71	movq  %rax,32(%rdi)
 72	movq  %rax,40(%rdi)
 73	movq  %rax,48(%rdi)
 74	movq  %rax,56(%rdi)
 75	leaq  64(%rdi),%rdi
 76	jnz    .Lloop_64
 77
 78	/* Handle tail in loops. The loops should be faster than hard
 79	   to predict jump tables. */
 80	.p2align 4
 81.Lhandle_tail:
 82	movl	%edx,%ecx
 83	andl    $63&(~7),%ecx
 84	jz 		.Lhandle_7
 85	shrl	$3,%ecx
 86	.p2align 4
 87.Lloop_8:
 88	decl   %ecx
 89	movq  %rax,(%rdi)
 90	leaq  8(%rdi),%rdi
 91	jnz    .Lloop_8
 92
 93.Lhandle_7:
 94	andl	$7,%edx
 95	jz      .Lende
 96	.p2align 4
 97.Lloop_1:
 98	decl    %edx
 99	movb 	%al,(%rdi)
100	leaq	1(%rdi),%rdi
101	jnz     .Lloop_1
102
103.Lende:
104	movq	%r10,%rax
105	RET
106
107.Lbad_alignment:
108	cmpq $7,%rdx
109	jbe	.Lhandle_7
110	movq %rax,(%rdi)	/* unaligned store */
111	movq $8,%r8
112	subq %r9,%r8
113	addq %r8,%rdi
114	subq %r8,%rdx
115	jmp .Lafter_bad_alignment
116.Lfinal:
117SYM_FUNC_END(memset_orig)