Linux Audio

Check our new training course

Loading...
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/* Copyright 2002 Andi Kleen */
  3
  4#include <linux/export.h>
  5#include <linux/linkage.h>
  6#include <linux/cfi_types.h>
  7#include <asm/errno.h>
  8#include <asm/cpufeatures.h>
  9#include <asm/alternative.h>
 
 10
 11.section .noinstr.text, "ax"
 
 
 
 
 
 
 
 12
 13/*
 14 * memcpy - Copy a memory block.
 15 *
 16 * Input:
 17 *  rdi destination
 18 *  rsi source
 19 *  rdx count
 20 *
 21 * Output:
 22 * rax original destination
 23 *
 24 * The FSRM alternative should be done inline (avoiding the call and
 25 * the disgusting return handling), but that would require some help
 26 * from the compiler for better calling conventions.
 27 *
 28 * The 'rep movsb' itself is small enough to replace the call, but the
 29 * two register moves blow up the code. And one of them is "needed"
 30 * only for the return value that is the same as the source input,
 31 * which the compiler could/should do much better anyway.
 32 */
 33SYM_TYPED_FUNC_START(__memcpy)
 34	ALTERNATIVE "jmp memcpy_orig", "", X86_FEATURE_FSRM
 
 35
 36	movq %rdi, %rax
 37	movq %rdx, %rcx
 
 
 
 
 38	rep movsb
 39	RET
 40SYM_FUNC_END(__memcpy)
 41EXPORT_SYMBOL(__memcpy)
 42
 43SYM_FUNC_ALIAS_MEMFUNC(memcpy, __memcpy)
 44EXPORT_SYMBOL(memcpy)
 45
 
 
 
 
 
 
 
 
 
 
 
 46SYM_FUNC_START_LOCAL(memcpy_orig)
 47	movq %rdi, %rax
 48
 49	cmpq $0x20, %rdx
 50	jb .Lhandle_tail
 51
 52	/*
 53	 * We check whether memory false dependence could occur,
 54	 * then jump to corresponding copy mode.
 55	 */
 56	cmp  %dil, %sil
 57	jl .Lcopy_backward
 58	subq $0x20, %rdx
 59.Lcopy_forward_loop:
 60	subq $0x20,	%rdx
 61
 62	/*
 63	 * Move in blocks of 4x8 bytes:
 64	 */
 65	movq 0*8(%rsi),	%r8
 66	movq 1*8(%rsi),	%r9
 67	movq 2*8(%rsi),	%r10
 68	movq 3*8(%rsi),	%r11
 69	leaq 4*8(%rsi),	%rsi
 70
 71	movq %r8,	0*8(%rdi)
 72	movq %r9,	1*8(%rdi)
 73	movq %r10,	2*8(%rdi)
 74	movq %r11,	3*8(%rdi)
 75	leaq 4*8(%rdi),	%rdi
 76	jae  .Lcopy_forward_loop
 77	addl $0x20,	%edx
 78	jmp  .Lhandle_tail
 79
 80.Lcopy_backward:
 81	/*
 82	 * Calculate copy position to tail.
 83	 */
 84	addq %rdx,	%rsi
 85	addq %rdx,	%rdi
 86	subq $0x20,	%rdx
 87	/*
 88	 * At most 3 ALU operations in one cycle,
 89	 * so append NOPS in the same 16 bytes trunk.
 90	 */
 91	.p2align 4
 92.Lcopy_backward_loop:
 93	subq $0x20,	%rdx
 94	movq -1*8(%rsi),	%r8
 95	movq -2*8(%rsi),	%r9
 96	movq -3*8(%rsi),	%r10
 97	movq -4*8(%rsi),	%r11
 98	leaq -4*8(%rsi),	%rsi
 99	movq %r8,		-1*8(%rdi)
100	movq %r9,		-2*8(%rdi)
101	movq %r10,		-3*8(%rdi)
102	movq %r11,		-4*8(%rdi)
103	leaq -4*8(%rdi),	%rdi
104	jae  .Lcopy_backward_loop
105
106	/*
107	 * Calculate copy position to head.
108	 */
109	addl $0x20,	%edx
110	subq %rdx,	%rsi
111	subq %rdx,	%rdi
112.Lhandle_tail:
113	cmpl $16,	%edx
114	jb   .Lless_16bytes
115
116	/*
117	 * Move data from 16 bytes to 31 bytes.
118	 */
119	movq 0*8(%rsi), %r8
120	movq 1*8(%rsi),	%r9
121	movq -2*8(%rsi, %rdx),	%r10
122	movq -1*8(%rsi, %rdx),	%r11
123	movq %r8,	0*8(%rdi)
124	movq %r9,	1*8(%rdi)
125	movq %r10,	-2*8(%rdi, %rdx)
126	movq %r11,	-1*8(%rdi, %rdx)
127	RET
128	.p2align 4
129.Lless_16bytes:
130	cmpl $8,	%edx
131	jb   .Lless_8bytes
132	/*
133	 * Move data from 8 bytes to 15 bytes.
134	 */
135	movq 0*8(%rsi),	%r8
136	movq -1*8(%rsi, %rdx),	%r9
137	movq %r8,	0*8(%rdi)
138	movq %r9,	-1*8(%rdi, %rdx)
139	RET
140	.p2align 4
141.Lless_8bytes:
142	cmpl $4,	%edx
143	jb   .Lless_3bytes
144
145	/*
146	 * Move data from 4 bytes to 7 bytes.
147	 */
148	movl (%rsi), %ecx
149	movl -4(%rsi, %rdx), %r8d
150	movl %ecx, (%rdi)
151	movl %r8d, -4(%rdi, %rdx)
152	RET
153	.p2align 4
154.Lless_3bytes:
155	subl $1, %edx
156	jb .Lend
157	/*
158	 * Move data from 1 bytes to 3 bytes.
159	 */
160	movzbl (%rsi), %ecx
161	jz .Lstore_1byte
162	movzbq 1(%rsi), %r8
163	movzbq (%rsi, %rdx), %r9
164	movb %r8b, 1(%rdi)
165	movb %r9b, (%rdi, %rdx)
166.Lstore_1byte:
167	movb %cl, (%rdi)
168
169.Lend:
170	RET
171SYM_FUNC_END(memcpy_orig)
172
v6.2
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/* Copyright 2002 Andi Kleen */
  3
 
  4#include <linux/linkage.h>
  5#include <linux/cfi_types.h>
  6#include <asm/errno.h>
  7#include <asm/cpufeatures.h>
  8#include <asm/alternative.h>
  9#include <asm/export.h>
 10
 11.pushsection .noinstr.text, "ax"
 12
 13/*
 14 * We build a jump to memcpy_orig by default which gets NOPped out on
 15 * the majority of x86 CPUs which set REP_GOOD. In addition, CPUs which
 16 * have the enhanced REP MOVSB/STOSB feature (ERMS), change those NOPs
 17 * to a jmp to memcpy_erms which does the REP; MOVSB mem copy.
 18 */
 19
 20/*
 21 * memcpy - Copy a memory block.
 22 *
 23 * Input:
 24 *  rdi destination
 25 *  rsi source
 26 *  rdx count
 27 *
 28 * Output:
 29 * rax original destination
 
 
 
 
 
 
 
 
 
 30 */
 31SYM_TYPED_FUNC_START(__memcpy)
 32	ALTERNATIVE_2 "jmp memcpy_orig", "", X86_FEATURE_REP_GOOD, \
 33		      "jmp memcpy_erms", X86_FEATURE_ERMS
 34
 35	movq %rdi, %rax
 36	movq %rdx, %rcx
 37	shrq $3, %rcx
 38	andl $7, %edx
 39	rep movsq
 40	movl %edx, %ecx
 41	rep movsb
 42	RET
 43SYM_FUNC_END(__memcpy)
 44EXPORT_SYMBOL(__memcpy)
 45
 46SYM_FUNC_ALIAS_WEAK(memcpy, __memcpy)
 47EXPORT_SYMBOL(memcpy)
 48
 49/*
 50 * memcpy_erms() - enhanced fast string memcpy. This is faster and
 51 * simpler than memcpy. Use memcpy_erms when possible.
 52 */
 53SYM_FUNC_START_LOCAL(memcpy_erms)
 54	movq %rdi, %rax
 55	movq %rdx, %rcx
 56	rep movsb
 57	RET
 58SYM_FUNC_END(memcpy_erms)
 59
 60SYM_FUNC_START_LOCAL(memcpy_orig)
 61	movq %rdi, %rax
 62
 63	cmpq $0x20, %rdx
 64	jb .Lhandle_tail
 65
 66	/*
 67	 * We check whether memory false dependence could occur,
 68	 * then jump to corresponding copy mode.
 69	 */
 70	cmp  %dil, %sil
 71	jl .Lcopy_backward
 72	subq $0x20, %rdx
 73.Lcopy_forward_loop:
 74	subq $0x20,	%rdx
 75
 76	/*
 77	 * Move in blocks of 4x8 bytes:
 78	 */
 79	movq 0*8(%rsi),	%r8
 80	movq 1*8(%rsi),	%r9
 81	movq 2*8(%rsi),	%r10
 82	movq 3*8(%rsi),	%r11
 83	leaq 4*8(%rsi),	%rsi
 84
 85	movq %r8,	0*8(%rdi)
 86	movq %r9,	1*8(%rdi)
 87	movq %r10,	2*8(%rdi)
 88	movq %r11,	3*8(%rdi)
 89	leaq 4*8(%rdi),	%rdi
 90	jae  .Lcopy_forward_loop
 91	addl $0x20,	%edx
 92	jmp  .Lhandle_tail
 93
 94.Lcopy_backward:
 95	/*
 96	 * Calculate copy position to tail.
 97	 */
 98	addq %rdx,	%rsi
 99	addq %rdx,	%rdi
100	subq $0x20,	%rdx
101	/*
102	 * At most 3 ALU operations in one cycle,
103	 * so append NOPS in the same 16 bytes trunk.
104	 */
105	.p2align 4
106.Lcopy_backward_loop:
107	subq $0x20,	%rdx
108	movq -1*8(%rsi),	%r8
109	movq -2*8(%rsi),	%r9
110	movq -3*8(%rsi),	%r10
111	movq -4*8(%rsi),	%r11
112	leaq -4*8(%rsi),	%rsi
113	movq %r8,		-1*8(%rdi)
114	movq %r9,		-2*8(%rdi)
115	movq %r10,		-3*8(%rdi)
116	movq %r11,		-4*8(%rdi)
117	leaq -4*8(%rdi),	%rdi
118	jae  .Lcopy_backward_loop
119
120	/*
121	 * Calculate copy position to head.
122	 */
123	addl $0x20,	%edx
124	subq %rdx,	%rsi
125	subq %rdx,	%rdi
126.Lhandle_tail:
127	cmpl $16,	%edx
128	jb   .Lless_16bytes
129
130	/*
131	 * Move data from 16 bytes to 31 bytes.
132	 */
133	movq 0*8(%rsi), %r8
134	movq 1*8(%rsi),	%r9
135	movq -2*8(%rsi, %rdx),	%r10
136	movq -1*8(%rsi, %rdx),	%r11
137	movq %r8,	0*8(%rdi)
138	movq %r9,	1*8(%rdi)
139	movq %r10,	-2*8(%rdi, %rdx)
140	movq %r11,	-1*8(%rdi, %rdx)
141	RET
142	.p2align 4
143.Lless_16bytes:
144	cmpl $8,	%edx
145	jb   .Lless_8bytes
146	/*
147	 * Move data from 8 bytes to 15 bytes.
148	 */
149	movq 0*8(%rsi),	%r8
150	movq -1*8(%rsi, %rdx),	%r9
151	movq %r8,	0*8(%rdi)
152	movq %r9,	-1*8(%rdi, %rdx)
153	RET
154	.p2align 4
155.Lless_8bytes:
156	cmpl $4,	%edx
157	jb   .Lless_3bytes
158
159	/*
160	 * Move data from 4 bytes to 7 bytes.
161	 */
162	movl (%rsi), %ecx
163	movl -4(%rsi, %rdx), %r8d
164	movl %ecx, (%rdi)
165	movl %r8d, -4(%rdi, %rdx)
166	RET
167	.p2align 4
168.Lless_3bytes:
169	subl $1, %edx
170	jb .Lend
171	/*
172	 * Move data from 1 bytes to 3 bytes.
173	 */
174	movzbl (%rsi), %ecx
175	jz .Lstore_1byte
176	movzbq 1(%rsi), %r8
177	movzbq (%rsi, %rdx), %r9
178	movb %r8b, 1(%rdi)
179	movb %r9b, (%rdi, %rdx)
180.Lstore_1byte:
181	movb %cl, (%rdi)
182
183.Lend:
184	RET
185SYM_FUNC_END(memcpy_orig)
186
187.popsection