Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright (C) 2013 ARM Ltd.
  3 * Copyright (C) 2013 Linaro.
  4 *
  5 * This code is based on glibc cortex strings work originally authored by Linaro
  6 * and re-licensed under GPLv2 for the Linux kernel. The original code can
  7 * be found @
  8 *
  9 * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
 10 * files/head:/src/aarch64/
 11 *
 12 * This program is free software; you can redistribute it and/or modify
 13 * it under the terms of the GNU General Public License version 2 as
 14 * published by the Free Software Foundation.
 15 *
 16 * This program is distributed in the hope that it will be useful,
 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 19 * GNU General Public License for more details.
 20 *
 21 * You should have received a copy of the GNU General Public License
 22 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 23 */
 24
 25#include <linux/linkage.h>
 26#include <asm/assembler.h>
 27#include <asm/cache.h>
 28
 29/*
 30 * Fill in the buffer with character c (alignment handled by the hardware)
 31 *
 32 * Parameters:
 33 *	x0 - buf
 34 *	x1 - c
 35 *	x2 - n
 36 * Returns:
 37 *	x0 - buf
 38 */
 39
 40dstin		.req	x0
 41val		.req	w1
 42count		.req	x2
 43tmp1		.req	x3
 44tmp1w		.req	w3
 45tmp2		.req	x4
 46tmp2w		.req	w4
 47zva_len_x	.req	x5
 48zva_len		.req	w5
 49zva_bits_x	.req	x6
 50
 51A_l		.req	x7
 52A_lw		.req	w7
 53dst		.req	x8
 54tmp3w		.req	w9
 55tmp3		.req	x9
 56
 57	.weak memset
 58ENTRY(__memset)
 59ENTRY(memset)
 60	mov	dst, dstin	/* Preserve return value.  */
 61	and	A_lw, val, #255
 62	orr	A_lw, A_lw, A_lw, lsl #8
 63	orr	A_lw, A_lw, A_lw, lsl #16
 64	orr	A_l, A_l, A_l, lsl #32
 65
 66	cmp	count, #15
 67	b.hi	.Lover16_proc
 68	/*All store maybe are non-aligned..*/
 69	tbz	count, #3, 1f
 70	str	A_l, [dst], #8
 711:
 72	tbz	count, #2, 2f
 73	str	A_lw, [dst], #4
 742:
 75	tbz	count, #1, 3f
 76	strh	A_lw, [dst], #2
 773:
 78	tbz	count, #0, 4f
 79	strb	A_lw, [dst]
 804:
 81	ret
 82
 83.Lover16_proc:
 84	/*Whether  the start address is aligned with 16.*/
 85	neg	tmp2, dst
 86	ands	tmp2, tmp2, #15
 87	b.eq	.Laligned
 88/*
 89* The count is not less than 16, we can use stp to store the start 16 bytes,
 90* then adjust the dst aligned with 16.This process will make the current
 91* memory address at alignment boundary.
 92*/
 93	stp	A_l, A_l, [dst] /*non-aligned store..*/
 94	/*make the dst aligned..*/
 95	sub	count, count, tmp2
 96	add	dst, dst, tmp2
 97
 98.Laligned:
 99	cbz	A_l, .Lzero_mem
100
101.Ltail_maybe_long:
102	cmp	count, #64
103	b.ge	.Lnot_short
104.Ltail63:
105	ands	tmp1, count, #0x30
106	b.eq	3f
107	cmp	tmp1w, #0x20
108	b.eq	1f
109	b.lt	2f
110	stp	A_l, A_l, [dst], #16
1111:
112	stp	A_l, A_l, [dst], #16
1132:
114	stp	A_l, A_l, [dst], #16
115/*
116* The last store length is less than 16,use stp to write last 16 bytes.
117* It will lead some bytes written twice and the access is non-aligned.
118*/
1193:
120	ands	count, count, #15
121	cbz	count, 4f
122	add	dst, dst, count
123	stp	A_l, A_l, [dst, #-16]	/* Repeat some/all of last store. */
1244:
125	ret
126
127	/*
128	* Critical loop. Start at a new cache line boundary. Assuming
129	* 64 bytes per line, this ensures the entire loop is in one line.
130	*/
131	.p2align	L1_CACHE_SHIFT
132.Lnot_short:
133	sub	dst, dst, #16/* Pre-bias.  */
134	sub	count, count, #64
1351:
136	stp	A_l, A_l, [dst, #16]
137	stp	A_l, A_l, [dst, #32]
138	stp	A_l, A_l, [dst, #48]
139	stp	A_l, A_l, [dst, #64]!
140	subs	count, count, #64
141	b.ge	1b
142	tst	count, #0x3f
143	add	dst, dst, #16
144	b.ne	.Ltail63
145.Lexitfunc:
146	ret
147
148	/*
149	* For zeroing memory, check to see if we can use the ZVA feature to
150	* zero entire 'cache' lines.
151	*/
152.Lzero_mem:
153	cmp	count, #63
154	b.le	.Ltail63
155	/*
156	* For zeroing small amounts of memory, it's not worth setting up
157	* the line-clear code.
158	*/
159	cmp	count, #128
160	b.lt	.Lnot_short /*count is at least  128 bytes*/
161
162	mrs	tmp1, dczid_el0
163	tbnz	tmp1, #4, .Lnot_short
164	mov	tmp3w, #4
165	and	zva_len, tmp1w, #15	/* Safety: other bits reserved.  */
166	lsl	zva_len, tmp3w, zva_len
167
168	ands	tmp3w, zva_len, #63
169	/*
170	* ensure the zva_len is not less than 64.
171	* It is not meaningful to use ZVA if the block size is less than 64.
172	*/
173	b.ne	.Lnot_short
174.Lzero_by_line:
175	/*
176	* Compute how far we need to go to become suitably aligned. We're
177	* already at quad-word alignment.
178	*/
179	cmp	count, zva_len_x
180	b.lt	.Lnot_short		/* Not enough to reach alignment.  */
181	sub	zva_bits_x, zva_len_x, #1
182	neg	tmp2, dst
183	ands	tmp2, tmp2, zva_bits_x
184	b.eq	2f			/* Already aligned.  */
185	/* Not aligned, check that there's enough to copy after alignment.*/
186	sub	tmp1, count, tmp2
187	/*
188	* grantee the remain length to be ZVA is bigger than 64,
189	* avoid to make the 2f's process over mem range.*/
190	cmp	tmp1, #64
191	ccmp	tmp1, zva_len_x, #8, ge	/* NZCV=0b1000 */
192	b.lt	.Lnot_short
193	/*
194	* We know that there's at least 64 bytes to zero and that it's safe
195	* to overrun by 64 bytes.
196	*/
197	mov	count, tmp1
1981:
199	stp	A_l, A_l, [dst]
200	stp	A_l, A_l, [dst, #16]
201	stp	A_l, A_l, [dst, #32]
202	subs	tmp2, tmp2, #64
203	stp	A_l, A_l, [dst, #48]
204	add	dst, dst, #64
205	b.ge	1b
206	/* We've overrun a bit, so adjust dst downwards.*/
207	add	dst, dst, tmp2
2082:
209	sub	count, count, zva_len_x
2103:
211	dc	zva, dst
212	add	dst, dst, zva_len_x
213	subs	count, count, zva_len_x
214	b.ge	3b
215	ands	count, count, zva_bits_x
216	b.ne	.Ltail_maybe_long
217	ret
218ENDPIPROC(memset)
219ENDPROC(__memset)
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 * Copyright (C) 2013 ARM Ltd.
  4 * Copyright (C) 2013 Linaro.
  5 *
  6 * This code is based on glibc cortex strings work originally authored by Linaro
  7 * be found @
  8 *
  9 * http://bazaar.launchpad.net/~linaro-toolchain-dev/cortex-strings/trunk/
 10 * files/head:/src/aarch64/
 11 */
 12
 13#include <linux/linkage.h>
 14#include <asm/assembler.h>
 15#include <asm/cache.h>
 16
 17/*
 18 * Fill in the buffer with character c (alignment handled by the hardware)
 19 *
 20 * Parameters:
 21 *	x0 - buf
 22 *	x1 - c
 23 *	x2 - n
 24 * Returns:
 25 *	x0 - buf
 26 */
 27
 28dstin		.req	x0
 29val		.req	w1
 30count		.req	x2
 31tmp1		.req	x3
 32tmp1w		.req	w3
 33tmp2		.req	x4
 34tmp2w		.req	w4
 35zva_len_x	.req	x5
 36zva_len		.req	w5
 37zva_bits_x	.req	x6
 38
 39A_l		.req	x7
 40A_lw		.req	w7
 41dst		.req	x8
 42tmp3w		.req	w9
 43tmp3		.req	x9
 44
 45SYM_FUNC_START(__pi_memset)
 46	mov	dst, dstin	/* Preserve return value.  */
 47	and	A_lw, val, #255
 48	orr	A_lw, A_lw, A_lw, lsl #8
 49	orr	A_lw, A_lw, A_lw, lsl #16
 50	orr	A_l, A_l, A_l, lsl #32
 51
 52	cmp	count, #15
 53	b.hi	.Lover16_proc
 54	/*All store maybe are non-aligned..*/
 55	tbz	count, #3, 1f
 56	str	A_l, [dst], #8
 571:
 58	tbz	count, #2, 2f
 59	str	A_lw, [dst], #4
 602:
 61	tbz	count, #1, 3f
 62	strh	A_lw, [dst], #2
 633:
 64	tbz	count, #0, 4f
 65	strb	A_lw, [dst]
 664:
 67	ret
 68
 69.Lover16_proc:
 70	/*Whether  the start address is aligned with 16.*/
 71	neg	tmp2, dst
 72	ands	tmp2, tmp2, #15
 73	b.eq	.Laligned
 74/*
 75* The count is not less than 16, we can use stp to store the start 16 bytes,
 76* then adjust the dst aligned with 16.This process will make the current
 77* memory address at alignment boundary.
 78*/
 79	stp	A_l, A_l, [dst] /*non-aligned store..*/
 80	/*make the dst aligned..*/
 81	sub	count, count, tmp2
 82	add	dst, dst, tmp2
 83
 84.Laligned:
 85	cbz	A_l, .Lzero_mem
 86
 87.Ltail_maybe_long:
 88	cmp	count, #64
 89	b.ge	.Lnot_short
 90.Ltail63:
 91	ands	tmp1, count, #0x30
 92	b.eq	3f
 93	cmp	tmp1w, #0x20
 94	b.eq	1f
 95	b.lt	2f
 96	stp	A_l, A_l, [dst], #16
 971:
 98	stp	A_l, A_l, [dst], #16
 992:
100	stp	A_l, A_l, [dst], #16
101/*
102* The last store length is less than 16,use stp to write last 16 bytes.
103* It will lead some bytes written twice and the access is non-aligned.
104*/
1053:
106	ands	count, count, #15
107	cbz	count, 4f
108	add	dst, dst, count
109	stp	A_l, A_l, [dst, #-16]	/* Repeat some/all of last store. */
1104:
111	ret
112
113	/*
114	* Critical loop. Start at a new cache line boundary. Assuming
115	* 64 bytes per line, this ensures the entire loop is in one line.
116	*/
117	.p2align	L1_CACHE_SHIFT
118.Lnot_short:
119	sub	dst, dst, #16/* Pre-bias.  */
120	sub	count, count, #64
1211:
122	stp	A_l, A_l, [dst, #16]
123	stp	A_l, A_l, [dst, #32]
124	stp	A_l, A_l, [dst, #48]
125	stp	A_l, A_l, [dst, #64]!
126	subs	count, count, #64
127	b.ge	1b
128	tst	count, #0x3f
129	add	dst, dst, #16
130	b.ne	.Ltail63
131.Lexitfunc:
132	ret
133
134	/*
135	* For zeroing memory, check to see if we can use the ZVA feature to
136	* zero entire 'cache' lines.
137	*/
138.Lzero_mem:
139	cmp	count, #63
140	b.le	.Ltail63
141	/*
142	* For zeroing small amounts of memory, it's not worth setting up
143	* the line-clear code.
144	*/
145	cmp	count, #128
146	b.lt	.Lnot_short /*count is at least  128 bytes*/
147
148	mrs	tmp1, dczid_el0
149	tbnz	tmp1, #4, .Lnot_short
150	mov	tmp3w, #4
151	and	zva_len, tmp1w, #15	/* Safety: other bits reserved.  */
152	lsl	zva_len, tmp3w, zva_len
153
154	ands	tmp3w, zva_len, #63
155	/*
156	* ensure the zva_len is not less than 64.
157	* It is not meaningful to use ZVA if the block size is less than 64.
158	*/
159	b.ne	.Lnot_short
160.Lzero_by_line:
161	/*
162	* Compute how far we need to go to become suitably aligned. We're
163	* already at quad-word alignment.
164	*/
165	cmp	count, zva_len_x
166	b.lt	.Lnot_short		/* Not enough to reach alignment.  */
167	sub	zva_bits_x, zva_len_x, #1
168	neg	tmp2, dst
169	ands	tmp2, tmp2, zva_bits_x
170	b.eq	2f			/* Already aligned.  */
171	/* Not aligned, check that there's enough to copy after alignment.*/
172	sub	tmp1, count, tmp2
173	/*
174	* grantee the remain length to be ZVA is bigger than 64,
175	* avoid to make the 2f's process over mem range.*/
176	cmp	tmp1, #64
177	ccmp	tmp1, zva_len_x, #8, ge	/* NZCV=0b1000 */
178	b.lt	.Lnot_short
179	/*
180	* We know that there's at least 64 bytes to zero and that it's safe
181	* to overrun by 64 bytes.
182	*/
183	mov	count, tmp1
1841:
185	stp	A_l, A_l, [dst]
186	stp	A_l, A_l, [dst, #16]
187	stp	A_l, A_l, [dst, #32]
188	subs	tmp2, tmp2, #64
189	stp	A_l, A_l, [dst, #48]
190	add	dst, dst, #64
191	b.ge	1b
192	/* We've overrun a bit, so adjust dst downwards.*/
193	add	dst, dst, tmp2
1942:
195	sub	count, count, zva_len_x
1963:
197	dc	zva, dst
198	add	dst, dst, zva_len_x
199	subs	count, count, zva_len_x
200	b.ge	3b
201	ands	count, count, zva_bits_x
202	b.ne	.Ltail_maybe_long
203	ret
204SYM_FUNC_END(__pi_memset)
205
206SYM_FUNC_ALIAS(__memset, __pi_memset)
207EXPORT_SYMBOL(__memset)
208
209SYM_FUNC_ALIAS_WEAK(memset, __pi_memset)
210EXPORT_SYMBOL(memset)