Linux Audio

Check our new training course

Loading...
v3.1
 
  1/* Copyright 2002 Andi Kleen, SuSE Labs */
  2
 
  3#include <linux/linkage.h>
  4#include <asm/dwarf2.h>
  5#include <asm/cpufeature.h>
  6#include <asm/alternative-asm.h>
 
  7
  8/*
  9 * ISO C memset - set a memory block to a byte value. This function uses fast
 10 * string to get better performance than the original function. The code is
 11 * simpler and shorter than the orignal function as well.
 12 *	
 13 * rdi   destination
 14 * rsi   value (char) 
 15 * rdx   count (bytes) 
 16 * 
 17 * rax   original destination
 18 */	
 19	.section .altinstr_replacement, "ax", @progbits
 20.Lmemset_c:
 21	movq %rdi,%r9
 22	movl %edx,%r8d
 23	andl $7,%r8d
 24	movl %edx,%ecx
 25	shrl $3,%ecx
 26	/* expand byte value  */
 27	movzbl %sil,%esi
 28	movabs $0x0101010101010101,%rax
 29	mulq %rsi		/* with rax, clobbers rdx */
 30	rep stosq
 31	movl %r8d,%ecx
 32	rep stosb
 33	movq %r9,%rax
 34	ret
 35.Lmemset_e:
 36	.previous
 37
 38/*
 39 * ISO C memset - set a memory block to a byte value. This function uses
 40 * enhanced rep stosb to override the fast string function.
 41 * The code is simpler and shorter than the fast string function as well.
 42 *
 43 * rdi   destination
 44 * rsi   value (char)
 45 * rdx   count (bytes)
 46 *
 47 * rax   original destination
 
 
 
 
 
 
 
 
 
 48 */
 49	.section .altinstr_replacement, "ax", @progbits
 50.Lmemset_c_e:
 
 51	movq %rdi,%r9
 52	movb %sil,%al
 53	movl %edx,%ecx
 54	rep stosb
 55	movq %r9,%rax
 56	ret
 57.Lmemset_e_e:
 58	.previous
 59
 60ENTRY(memset)
 61ENTRY(__memset)
 62	CFI_STARTPROC
 
 63	movq %rdi,%r10
 64	movq %rdx,%r11
 65
 66	/* expand byte value  */
 67	movzbl %sil,%ecx
 68	movabs $0x0101010101010101,%rax
 69	mul    %rcx		/* with rax, clobbers rdx */
 70
 71	/* align dst */
 72	movl  %edi,%r9d
 73	andl  $7,%r9d
 74	jnz  .Lbad_alignment
 75	CFI_REMEMBER_STATE
 76.Lafter_bad_alignment:
 77
 78	movl %r11d,%ecx
 79	shrl $6,%ecx
 80	jz	 .Lhandle_tail
 81
 82	.p2align 4
 83.Lloop_64:
 84	decl   %ecx
 85	movq  %rax,(%rdi)
 86	movq  %rax,8(%rdi)
 87	movq  %rax,16(%rdi)
 88	movq  %rax,24(%rdi)
 89	movq  %rax,32(%rdi)
 90	movq  %rax,40(%rdi)
 91	movq  %rax,48(%rdi)
 92	movq  %rax,56(%rdi)
 93	leaq  64(%rdi),%rdi
 94	jnz    .Lloop_64
 95
 96	/* Handle tail in loops. The loops should be faster than hard
 97	   to predict jump tables. */
 98	.p2align 4
 99.Lhandle_tail:
100	movl	%r11d,%ecx
101	andl    $63&(~7),%ecx
102	jz 		.Lhandle_7
103	shrl	$3,%ecx
104	.p2align 4
105.Lloop_8:
106	decl   %ecx
107	movq  %rax,(%rdi)
108	leaq  8(%rdi),%rdi
109	jnz    .Lloop_8
110
111.Lhandle_7:
112	movl	%r11d,%ecx
113	andl	$7,%ecx
114	jz      .Lende
115	.p2align 4
116.Lloop_1:
117	decl    %ecx
118	movb 	%al,(%rdi)
119	leaq	1(%rdi),%rdi
120	jnz     .Lloop_1
121
122.Lende:
123	movq	%r10,%rax
124	ret
125
126	CFI_RESTORE_STATE
127.Lbad_alignment:
128	cmpq $7,%r11
129	jbe	.Lhandle_7
130	movq %rax,(%rdi)	/* unaligned store */
131	movq $8,%r8
132	subq %r9,%r8
133	addq %r8,%rdi
134	subq %r8,%r11
135	jmp .Lafter_bad_alignment
136.Lfinal:
137	CFI_ENDPROC
138ENDPROC(memset)
139ENDPROC(__memset)
140
141	/* Some CPUs support enhanced REP MOVSB/STOSB feature.
142	 * It is recommended to use this when possible.
143	 *
144	 * If enhanced REP MOVSB/STOSB feature is not available, use fast string
145	 * instructions.
146	 *
147	 * Otherwise, use original memset function.
148	 *
149	 * In .altinstructions section, ERMS feature is placed after REG_GOOD
150         * feature to implement the right patch order.
151	 */
152	.section .altinstructions,"a"
153	altinstruction_entry memset,.Lmemset_c,X86_FEATURE_REP_GOOD,\
154			     .Lfinal-memset,.Lmemset_e-.Lmemset_c
155	altinstruction_entry memset,.Lmemset_c_e,X86_FEATURE_ERMS, \
156			     .Lfinal-memset,.Lmemset_e_e-.Lmemset_c_e
157	.previous
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* Copyright 2002 Andi Kleen, SuSE Labs */
  3
  4#include <linux/export.h>
  5#include <linux/linkage.h>
  6#include <asm/cpufeatures.h>
  7#include <asm/alternative.h>
  8
  9.section .noinstr.text, "ax"
 10
 11/*
 12 * ISO C memset - set a memory block to a byte value. This function uses fast
 13 * string to get better performance than the original function. The code is
 14 * simpler and shorter than the original function as well.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 15 *
 16 * rdi   destination
 17 * rsi   value (char)
 18 * rdx   count (bytes)
 19 *
 20 * rax   original destination
 21 *
 22 * The FSRS alternative should be done inline (avoiding the call and
 23 * the disgusting return handling), but that would require some help
 24 * from the compiler for better calling conventions.
 25 *
 26 * The 'rep stosb' itself is small enough to replace the call, but all
 27 * the register moves blow up the code. And two of them are "needed"
 28 * only for the return value that is the same as the source input,
 29 * which the compiler could/should do much better anyway.
 30 */
 31SYM_FUNC_START(__memset)
 32	ALTERNATIVE "jmp memset_orig", "", X86_FEATURE_FSRS
 33
 34	movq %rdi,%r9
 35	movb %sil,%al
 36	movq %rdx,%rcx
 37	rep stosb
 38	movq %r9,%rax
 39	RET
 40SYM_FUNC_END(__memset)
 41EXPORT_SYMBOL(__memset)
 42
 43SYM_FUNC_ALIAS_MEMFUNC(memset, __memset)
 44EXPORT_SYMBOL(memset)
 45
 46SYM_FUNC_START_LOCAL(memset_orig)
 47	movq %rdi,%r10
 
 48
 49	/* expand byte value  */
 50	movzbl %sil,%ecx
 51	movabs $0x0101010101010101,%rax
 52	imulq  %rcx,%rax
 53
 54	/* align dst */
 55	movl  %edi,%r9d
 56	andl  $7,%r9d
 57	jnz  .Lbad_alignment
 
 58.Lafter_bad_alignment:
 59
 60	movq  %rdx,%rcx
 61	shrq  $6,%rcx
 62	jz	 .Lhandle_tail
 63
 64	.p2align 4
 65.Lloop_64:
 66	decq  %rcx
 67	movq  %rax,(%rdi)
 68	movq  %rax,8(%rdi)
 69	movq  %rax,16(%rdi)
 70	movq  %rax,24(%rdi)
 71	movq  %rax,32(%rdi)
 72	movq  %rax,40(%rdi)
 73	movq  %rax,48(%rdi)
 74	movq  %rax,56(%rdi)
 75	leaq  64(%rdi),%rdi
 76	jnz    .Lloop_64
 77
 78	/* Handle tail in loops. The loops should be faster than hard
 79	   to predict jump tables. */
 80	.p2align 4
 81.Lhandle_tail:
 82	movl	%edx,%ecx
 83	andl    $63&(~7),%ecx
 84	jz 		.Lhandle_7
 85	shrl	$3,%ecx
 86	.p2align 4
 87.Lloop_8:
 88	decl   %ecx
 89	movq  %rax,(%rdi)
 90	leaq  8(%rdi),%rdi
 91	jnz    .Lloop_8
 92
 93.Lhandle_7:
 94	andl	$7,%edx
 
 95	jz      .Lende
 96	.p2align 4
 97.Lloop_1:
 98	decl    %edx
 99	movb 	%al,(%rdi)
100	leaq	1(%rdi),%rdi
101	jnz     .Lloop_1
102
103.Lende:
104	movq	%r10,%rax
105	RET
106
 
107.Lbad_alignment:
108	cmpq $7,%rdx
109	jbe	.Lhandle_7
110	movq %rax,(%rdi)	/* unaligned store */
111	movq $8,%r8
112	subq %r9,%r8
113	addq %r8,%rdi
114	subq %r8,%rdx
115	jmp .Lafter_bad_alignment
116.Lfinal:
117SYM_FUNC_END(memset_orig)