Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* U3memcpy.S: UltraSparc-III optimized memcpy.
  3 *
  4 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
  5 */
  6
  7#ifdef __KERNEL__
  8#include <linux/linkage.h>
  9#include <asm/visasm.h>
 10#include <asm/asi.h>
 11#define GLOBAL_SPARE	%g7
 12#else
 13#define ASI_BLK_P 0xf0
 14#define FPRS_FEF  0x04
 15#ifdef MEMCPY_DEBUG
 16#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
 17		     clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
 18#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
 19#else
 20#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
 21#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
 22#endif
 23#define GLOBAL_SPARE	%g5
 24#endif
 25
 26#ifndef EX_LD
 27#define EX_LD(x,y)	x
 28#endif
 29#ifndef EX_LD_FP
 30#define EX_LD_FP(x,y)	x
 31#endif
 32
 33#ifndef EX_ST
 34#define EX_ST(x,y)	x
 35#endif
 36#ifndef EX_ST_FP
 37#define EX_ST_FP(x,y)	x
 
 
 
 
 38#endif
 39
 40#ifndef LOAD
 41#define LOAD(type,addr,dest)	type [addr], dest
 42#endif
 43
 44#ifndef STORE
 45#define STORE(type,src,addr)	type src, [addr]
 46#endif
 47
 48#ifndef STORE_BLK
 49#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_P
 50#endif
 51
 52#ifndef FUNC_NAME
 53#define FUNC_NAME	U3memcpy
 54#endif
 55
 56#ifndef PREAMBLE
 57#define PREAMBLE
 58#endif
 59
 60#ifndef XCC
 61#define XCC xcc
 62#endif
 63
 64	.register	%g2,#scratch
 65	.register	%g3,#scratch
 66
 67	/* Special/non-trivial issues of this code:
 68	 *
 69	 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
 70	 * 2) Only low 32 FPU registers are used so that only the
 71	 *    lower half of the FPU register set is dirtied by this
 72	 *    code.  This is especially important in the kernel.
 73	 * 3) This code never prefetches cachelines past the end
 74	 *    of the source buffer.
 75	 */
 76
 77	.text
 78#ifndef EX_RETVAL
 79#define EX_RETVAL(x)	x
 80__restore_fp:
 81	VISExitHalf
 82	retl
 83	 nop
 84ENTRY(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
 85	add	%g1, 1, %g1
 86	add	%g2, %g1, %g2
 87	ba,pt	%xcc, __restore_fp
 88	 add	%o2, %g2, %o0
 89ENDPROC(U3_retl_o2_plus_g2_plus_g1_plus_1_fp)
 90ENTRY(U3_retl_o2_plus_g2_fp)
 91	ba,pt	%xcc, __restore_fp
 92	 add	%o2, %g2, %o0
 93ENDPROC(U3_retl_o2_plus_g2_fp)
 94ENTRY(U3_retl_o2_plus_g2_plus_8_fp)
 95	add	%g2, 8, %g2
 96	ba,pt	%xcc, __restore_fp
 97	 add	%o2, %g2, %o0
 98ENDPROC(U3_retl_o2_plus_g2_plus_8_fp)
 99ENTRY(U3_retl_o2)
100	retl
101	 mov	%o2, %o0
102ENDPROC(U3_retl_o2)
103ENTRY(U3_retl_o2_plus_1)
104	retl
105	 add	%o2, 1, %o0
106ENDPROC(U3_retl_o2_plus_1)
107ENTRY(U3_retl_o2_plus_4)
108	retl
109	 add	%o2, 4, %o0
110ENDPROC(U3_retl_o2_plus_4)
111ENTRY(U3_retl_o2_plus_8)
112	retl
113	 add	%o2, 8, %o0
114ENDPROC(U3_retl_o2_plus_8)
115ENTRY(U3_retl_o2_plus_g1_plus_1)
116	add	%g1, 1, %g1
117	retl
118	 add	%o2, %g1, %o0
119ENDPROC(U3_retl_o2_plus_g1_plus_1)
120ENTRY(U3_retl_o2_fp)
121	ba,pt	%xcc, __restore_fp
122	 mov	%o2, %o0
123ENDPROC(U3_retl_o2_fp)
124ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
125	sll	%o3, 6, %o3
126	add	%o3, 0x80, %o3
127	ba,pt	%xcc, __restore_fp
128	 add	%o2, %o3, %o0
129ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x80_fp)
130ENTRY(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
131	sll	%o3, 6, %o3
132	add	%o3, 0x40, %o3
133	ba,pt	%xcc, __restore_fp
134	 add	%o2, %o3, %o0
135ENDPROC(U3_retl_o2_plus_o3_sll_6_plus_0x40_fp)
136ENTRY(U3_retl_o2_plus_GS_plus_0x10)
137	add	GLOBAL_SPARE, 0x10, GLOBAL_SPARE
138	retl
139	 add	%o2, GLOBAL_SPARE, %o0
140ENDPROC(U3_retl_o2_plus_GS_plus_0x10)
141ENTRY(U3_retl_o2_plus_GS_plus_0x08)
142	add	GLOBAL_SPARE, 0x08, GLOBAL_SPARE
143	retl
144	 add	%o2, GLOBAL_SPARE, %o0
145ENDPROC(U3_retl_o2_plus_GS_plus_0x08)
146ENTRY(U3_retl_o2_and_7_plus_GS)
147	and	%o2, 7, %o2
148	retl
149	 add	%o2, GLOBAL_SPARE, %o0
150ENDPROC(U3_retl_o2_and_7_plus_GS)
151ENTRY(U3_retl_o2_and_7_plus_GS_plus_8)
152	add	GLOBAL_SPARE, 8, GLOBAL_SPARE
153	and	%o2, 7, %o2
154	retl
155	 add	%o2, GLOBAL_SPARE, %o0
156ENDPROC(U3_retl_o2_and_7_plus_GS_plus_8)
157#endif
158
159	.align		64
160
161	/* The cheetah's flexible spine, oversized liver, enlarged heart,
162	 * slender muscular body, and claws make it the swiftest hunter
163	 * in Africa and the fastest animal on land.  Can reach speeds
164	 * of up to 2.4GB per second.
165	 */
166
167	.globl	FUNC_NAME
168	.type	FUNC_NAME,#function
169FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
170	srlx		%o2, 31, %g2
171	cmp		%g2, 0
172
173	/* software trap 5 "Range Check" if dst >= 0x80000000 */
174	tne		%xcc, 5
175	PREAMBLE
176	mov		%o0, %o4
177
178	/* if len == 0 */
179	cmp		%o2, 0
180	be,pn		%XCC, end_return
181	 or		%o0, %o1, %o3
182
183	/* if len < 16 */
184	cmp		%o2, 16
185	blu,a,pn	%XCC, less_than_16
186	 or		%o3, %o2, %o3
187
188	/* if len < 192 */
189	cmp		%o2, (3 * 64)
190	blu,pt		%XCC, less_than_192
191	 andcc		%o3, 0x7, %g0
192
193	/* Clobbers o5/g1/g2/g3/g7/icc/xcc.  We must preserve
194	 * o5 from here until we hit VISExitHalf.
195	 */
196	VISEntryHalf
197
198	/* Is 'dst' already aligned on an 64-byte boundary? */
199	andcc		%o0, 0x3f, %g2
200	be,pt		%XCC, 2f
201
202	/* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
203	 * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
204	 * subtract this from 'len'.
205	 */
206	 sub		%o0, %o1, GLOBAL_SPARE
207	sub		%g2, 0x40, %g2
208	sub		%g0, %g2, %g2
209	sub		%o2, %g2, %o2
210	andcc		%g2, 0x7, %g1
211	be,pt		%icc, 2f
212	 and		%g2, 0x38, %g2
213
2141:	subcc		%g1, 0x1, %g1
215	EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3), U3_retl_o2_plus_g2_plus_g1_plus_1)
216	EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE), U3_retl_o2_plus_g2_plus_g1_plus_1)
217	bgu,pt		%XCC, 1b
218	 add		%o1, 0x1, %o1
219
220	add		%o1, GLOBAL_SPARE, %o0
221
2222:	cmp		%g2, 0x0
223	and		%o1, 0x7, %g1
224	be,pt		%icc, 3f
225	 alignaddr	%o1, %g0, %o1
226
227	EX_LD_FP(LOAD(ldd, %o1, %f4), U3_retl_o2_plus_g2)
2281:	EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6), U3_retl_o2_plus_g2)
229	add		%o1, 0x8, %o1
230	subcc		%g2, 0x8, %g2
231	faligndata	%f4, %f6, %f0
232	EX_ST_FP(STORE(std, %f0, %o0), U3_retl_o2_plus_g2_plus_8)
233	be,pn		%icc, 3f
234	 add		%o0, 0x8, %o0
235
236	EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4), U3_retl_o2_plus_g2)
237	add		%o1, 0x8, %o1
238	subcc		%g2, 0x8, %g2
239	faligndata	%f6, %f4, %f2
240	EX_ST_FP(STORE(std, %f2, %o0), U3_retl_o2_plus_g2_plus_8)
241	bne,pt		%icc, 1b
242	 add		%o0, 0x8, %o0
243
2443:	LOAD(prefetch, %o1 + 0x000, #one_read)
245	LOAD(prefetch, %o1 + 0x040, #one_read)
246	andn		%o2, (0x40 - 1), GLOBAL_SPARE
247	LOAD(prefetch, %o1 + 0x080, #one_read)
248	LOAD(prefetch, %o1 + 0x0c0, #one_read)
249	LOAD(prefetch, %o1 + 0x100, #one_read)
250	EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0), U3_retl_o2)
251	LOAD(prefetch, %o1 + 0x140, #one_read)
252	EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2)
253	LOAD(prefetch, %o1 + 0x180, #one_read)
254	EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2)
255	LOAD(prefetch, %o1 + 0x1c0, #one_read)
256	faligndata	%f0, %f2, %f16
257	EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2)
258	faligndata	%f2, %f4, %f18
259	EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2)
260	faligndata	%f4, %f6, %f20
261	EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2)
262	faligndata	%f6, %f8, %f22
263
264	EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2)
265	faligndata	%f8, %f10, %f24
266	EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2)
267	faligndata	%f10, %f12, %f26
268	EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2)
269
270	subcc		GLOBAL_SPARE, 0x80, GLOBAL_SPARE
271	add		%o1, 0x40, %o1
272	bgu,pt		%XCC, 1f
273	 srl		GLOBAL_SPARE, 6, %o3
274	ba,pt		%xcc, 2f
275	 nop
276
277	.align		64
2781:
279	EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
280	faligndata	%f12, %f14, %f28
281	EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
282	faligndata	%f14, %f0, %f30
283	EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
284	EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
285	faligndata	%f0, %f2, %f16
286	add		%o0, 0x40, %o0
287
288	EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
289	faligndata	%f2, %f4, %f18
290	EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
291	faligndata	%f4, %f6, %f20
292	EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
293	subcc		%o3, 0x01, %o3
294	faligndata	%f6, %f8, %f22
295	EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x80)
296
297	faligndata	%f8, %f10, %f24
298	EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
299	LOAD(prefetch, %o1 + 0x1c0, #one_read)
300	faligndata	%f10, %f12, %f26
301	bg,pt		%XCC, 1b
302	 add		%o1, 0x40, %o1
303
304	/* Finally we copy the last full 64-byte block. */
3052:
306	EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2), U3_retl_o2_plus_o3_sll_6_plus_0x80)
307	faligndata	%f12, %f14, %f28
308	EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4), U3_retl_o2_plus_o3_sll_6_plus_0x80)
309	faligndata	%f14, %f0, %f30
310	EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x80)
311	EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6), U3_retl_o2_plus_o3_sll_6_plus_0x40)
312	faligndata	%f0, %f2, %f16
313	EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8), U3_retl_o2_plus_o3_sll_6_plus_0x40)
314	faligndata	%f2, %f4, %f18
315	EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10), U3_retl_o2_plus_o3_sll_6_plus_0x40)
316	faligndata	%f4, %f6, %f20
317	EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12), U3_retl_o2_plus_o3_sll_6_plus_0x40)
318	faligndata	%f6, %f8, %f22
319	EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14), U3_retl_o2_plus_o3_sll_6_plus_0x40)
320	faligndata	%f8, %f10, %f24
321	cmp		%g1, 0
322	be,pt		%XCC, 1f
323	 add		%o0, 0x40, %o0
324	EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
3251:	faligndata	%f10, %f12, %f26
326	faligndata	%f12, %f14, %f28
327	faligndata	%f14, %f0, %f30
328	EX_ST_FP(STORE_BLK(%f16, %o0), U3_retl_o2_plus_o3_sll_6_plus_0x40)
329	add		%o0, 0x40, %o0
330	add		%o1, 0x40, %o1
331	membar		#Sync
332
333	/* Now we copy the (len modulo 64) bytes at the end.
334	 * Note how we borrow the %f0 loaded above.
335	 *
336	 * Also notice how this code is careful not to perform a
337	 * load past the end of the src buffer.
338	 */
339	and		%o2, 0x3f, %o2
340	andcc		%o2, 0x38, %g2
341	be,pn		%XCC, 2f
342	 subcc		%g2, 0x8, %g2
343	be,pn		%XCC, 2f
344	 cmp		%g1, 0
345
346	sub		%o2, %g2, %o2
347	be,a,pt		%XCC, 1f
348	 EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0), U3_retl_o2_plus_g2)
349
3501:	EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2), U3_retl_o2_plus_g2)
351	add		%o1, 0x8, %o1
352	subcc		%g2, 0x8, %g2
353	faligndata	%f0, %f2, %f8
354	EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
355	be,pn		%XCC, 2f
356	 add		%o0, 0x8, %o0
357	EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0), U3_retl_o2_plus_g2)
358	add		%o1, 0x8, %o1
359	subcc		%g2, 0x8, %g2
360	faligndata	%f2, %f0, %f8
361	EX_ST_FP(STORE(std, %f8, %o0), U3_retl_o2_plus_g2_plus_8)
362	bne,pn		%XCC, 1b
363	 add		%o0, 0x8, %o0
364
365	/* If anything is left, we copy it one byte at a time.
366	 * Note that %g1 is (src & 0x3) saved above before the
367	 * alignaddr was performed.
368	 */
3692:
370	cmp		%o2, 0
371	add		%o1, %g1, %o1
372	VISExitHalf
373	be,pn		%XCC, end_return
374	 sub		%o0, %o1, %o3
375
376	andcc		%g1, 0x7, %g0
377	bne,pn		%icc, 90f
378	 andcc		%o2, 0x8, %g0
379	be,pt		%icc, 1f
380	 nop
381	EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2)
382	EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2)
383	add		%o1, 0x8, %o1
384	sub		%o2, 8, %o2
385
3861:	andcc		%o2, 0x4, %g0
387	be,pt		%icc, 1f
388	 nop
389	EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2)
390	EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2)
391	add		%o1, 0x4, %o1
392	sub		%o2, 4, %o2
393
3941:	andcc		%o2, 0x2, %g0
395	be,pt		%icc, 1f
396	 nop
397	EX_LD(LOAD(lduh, %o1, %o5), U3_retl_o2)
398	EX_ST(STORE(sth, %o5, %o1 + %o3), U3_retl_o2)
399	add		%o1, 0x2, %o1
400	sub		%o2, 2, %o2
401
4021:	andcc		%o2, 0x1, %g0
403	be,pt		%icc, end_return
404	 nop
405	EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2)
406	ba,pt		%xcc, end_return
407	 EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2)
408
409	.align		64
410	/* 16 <= len < 192 */
411less_than_192:
412	bne,pn		%XCC, 75f
413	 sub		%o0, %o1, %o3
414
41572:
416	andn		%o2, 0xf, GLOBAL_SPARE
417	and		%o2, 0xf, %o2
4181:	subcc		GLOBAL_SPARE, 0x10, GLOBAL_SPARE
419	EX_LD(LOAD(ldx, %o1 + 0x00, %o5), U3_retl_o2_plus_GS_plus_0x10)
420	EX_LD(LOAD(ldx, %o1 + 0x08, %g1), U3_retl_o2_plus_GS_plus_0x10)
421	EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x10)
422	add		%o1, 0x8, %o1
423	EX_ST(STORE(stx, %g1, %o1 + %o3), U3_retl_o2_plus_GS_plus_0x08)
424	bgu,pt		%XCC, 1b
425	 add		%o1, 0x8, %o1
42673:	andcc		%o2, 0x8, %g0
427	be,pt		%XCC, 1f
428	 nop
429	sub		%o2, 0x8, %o2
430	EX_LD(LOAD(ldx, %o1, %o5), U3_retl_o2_plus_8)
431	EX_ST(STORE(stx, %o5, %o1 + %o3), U3_retl_o2_plus_8)
432	add		%o1, 0x8, %o1
4331:	andcc		%o2, 0x4, %g0
434	be,pt		%XCC, 1f
435	 nop
436	sub		%o2, 0x4, %o2
437	EX_LD(LOAD(lduw, %o1, %o5), U3_retl_o2_plus_4)
438	EX_ST(STORE(stw, %o5, %o1 + %o3), U3_retl_o2_plus_4)
439	add		%o1, 0x4, %o1
4401:	cmp		%o2, 0
441	be,pt		%XCC, end_return
442	 nop
443	ba,pt		%xcc, 90f
444	 nop
445
44675:
447	andcc		%o0, 0x7, %g1
448	sub		%g1, 0x8, %g1
449	be,pn		%icc, 2f
450	 sub		%g0, %g1, %g1
451	sub		%o2, %g1, %o2
452
4531:	subcc		%g1, 1, %g1
454	EX_LD(LOAD(ldub, %o1, %o5), U3_retl_o2_plus_g1_plus_1)
455	EX_ST(STORE(stb, %o5, %o1 + %o3), U3_retl_o2_plus_g1_plus_1)
456	bgu,pt		%icc, 1b
457	 add		%o1, 1, %o1
458
4592:	add		%o1, %o3, %o0
460	andcc		%o1, 0x7, %g1
461	bne,pt		%icc, 8f
462	 sll		%g1, 3, %g1
463
464	cmp		%o2, 16
465	bgeu,pt		%icc, 72b
466	 nop
467	ba,a,pt		%xcc, 73b
468
4698:	mov		64, %o3
470	andn		%o1, 0x7, %o1
471	EX_LD(LOAD(ldx, %o1, %g2), U3_retl_o2)
472	sub		%o3, %g1, %o3
473	andn		%o2, 0x7, GLOBAL_SPARE
474	sllx		%g2, %g1, %g2
4751:	EX_LD(LOAD(ldx, %o1 + 0x8, %g3), U3_retl_o2_and_7_plus_GS)
476	subcc		GLOBAL_SPARE, 0x8, GLOBAL_SPARE
477	add		%o1, 0x8, %o1
478	srlx		%g3, %o3, %o5
479	or		%o5, %g2, %o5
480	EX_ST(STORE(stx, %o5, %o0), U3_retl_o2_and_7_plus_GS_plus_8)
481	add		%o0, 0x8, %o0
482	bgu,pt		%icc, 1b
483	 sllx		%g3, %g1, %g2
484
485	srl		%g1, 3, %g1
486	andcc		%o2, 0x7, %o2
487	be,pn		%icc, end_return
488	 add		%o1, %g1, %o1
489	ba,pt		%xcc, 90f
490	 sub		%o0, %o1, %o3
491
492	.align		64
493	/* 0 < len < 16 */
494less_than_16:
495	andcc		%o3, 0x3, %g0
496	bne,pn		%XCC, 90f
497	 sub		%o0, %o1, %o3
498
4991:
500	subcc		%o2, 4, %o2
501	EX_LD(LOAD(lduw, %o1, %g1), U3_retl_o2_plus_4)
502	EX_ST(STORE(stw, %g1, %o1 + %o3), U3_retl_o2_plus_4)
503	bgu,pt		%XCC, 1b
504	 add		%o1, 4, %o1
505
506end_return:
507	retl
508	 mov		EX_RETVAL(%o4), %o0
509
510	.align		32
51190:
512	subcc		%o2, 1, %o2
513	EX_LD(LOAD(ldub, %o1, %g1), U3_retl_o2_plus_1)
514	EX_ST(STORE(stb, %g1, %o1 + %o3), U3_retl_o2_plus_1)
515	bgu,pt		%XCC, 90b
516	 add		%o1, 1, %o1
517	retl
518	 mov		EX_RETVAL(%o4), %o0
519
520	.size		FUNC_NAME, .-FUNC_NAME
v4.6
 
  1/* U3memcpy.S: UltraSparc-III optimized memcpy.
  2 *
  3 * Copyright (C) 1999, 2000, 2004 David S. Miller (davem@redhat.com)
  4 */
  5
  6#ifdef __KERNEL__
 
  7#include <asm/visasm.h>
  8#include <asm/asi.h>
  9#define GLOBAL_SPARE	%g7
 10#else
 11#define ASI_BLK_P 0xf0
 12#define FPRS_FEF  0x04
 13#ifdef MEMCPY_DEBUG
 14#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs; \
 15		     clr %g1; clr %g2; clr %g3; subcc %g0, %g0, %g0;
 16#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
 17#else
 18#define VISEntryHalf rd %fprs, %o5; wr %g0, FPRS_FEF, %fprs
 19#define VISExitHalf and %o5, FPRS_FEF, %o5; wr %o5, 0x0, %fprs
 20#endif
 21#define GLOBAL_SPARE	%g5
 22#endif
 23
 24#ifndef EX_LD
 25#define EX_LD(x)	x
 26#endif
 27#ifndef EX_LD_FP
 28#define EX_LD_FP(x)	x
 29#endif
 30
 31#ifndef EX_ST
 32#define EX_ST(x)	x
 33#endif
 34#ifndef EX_ST_FP
 35#define EX_ST_FP(x)	x
 36#endif
 37
 38#ifndef EX_RETVAL
 39#define EX_RETVAL(x)	x
 40#endif
 41
 42#ifndef LOAD
 43#define LOAD(type,addr,dest)	type [addr], dest
 44#endif
 45
 46#ifndef STORE
 47#define STORE(type,src,addr)	type src, [addr]
 48#endif
 49
 50#ifndef STORE_BLK
 51#define STORE_BLK(src,addr)	stda src, [addr] ASI_BLK_P
 52#endif
 53
 54#ifndef FUNC_NAME
 55#define FUNC_NAME	U3memcpy
 56#endif
 57
 58#ifndef PREAMBLE
 59#define PREAMBLE
 60#endif
 61
 62#ifndef XCC
 63#define XCC xcc
 64#endif
 65
 66	.register	%g2,#scratch
 67	.register	%g3,#scratch
 68
 69	/* Special/non-trivial issues of this code:
 70	 *
 71	 * 1) %o5 is preserved from VISEntryHalf to VISExitHalf
 72	 * 2) Only low 32 FPU registers are used so that only the
 73	 *    lower half of the FPU register set is dirtied by this
 74	 *    code.  This is especially important in the kernel.
 75	 * 3) This code never prefetches cachelines past the end
 76	 *    of the source buffer.
 77	 */
 78
 79	.text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 80	.align		64
 81
 82	/* The cheetah's flexible spine, oversized liver, enlarged heart,
 83	 * slender muscular body, and claws make it the swiftest hunter
 84	 * in Africa and the fastest animal on land.  Can reach speeds
 85	 * of up to 2.4GB per second.
 86	 */
 87
 88	.globl	FUNC_NAME
 89	.type	FUNC_NAME,#function
 90FUNC_NAME:	/* %o0=dst, %o1=src, %o2=len */
 91	srlx		%o2, 31, %g2
 92	cmp		%g2, 0
 
 
 93	tne		%xcc, 5
 94	PREAMBLE
 95	mov		%o0, %o4
 
 
 96	cmp		%o2, 0
 97	be,pn		%XCC, 85f
 98	 or		%o0, %o1, %o3
 
 
 99	cmp		%o2, 16
100	blu,a,pn	%XCC, 80f
101	 or		%o3, %o2, %o3
102
 
103	cmp		%o2, (3 * 64)
104	blu,pt		%XCC, 70f
105	 andcc		%o3, 0x7, %g0
106
107	/* Clobbers o5/g1/g2/g3/g7/icc/xcc.  We must preserve
108	 * o5 from here until we hit VISExitHalf.
109	 */
110	VISEntryHalf
111
112	/* Is 'dst' already aligned on an 64-byte boundary? */
113	andcc		%o0, 0x3f, %g2
114	be,pt		%XCC, 2f
115
116	/* Compute abs((dst & 0x3f) - 0x40) into %g2.  This is the number
117	 * of bytes to copy to make 'dst' 64-byte aligned.  We pre-
118	 * subtract this from 'len'.
119	 */
120	 sub		%o0, %o1, GLOBAL_SPARE
121	sub		%g2, 0x40, %g2
122	sub		%g0, %g2, %g2
123	sub		%o2, %g2, %o2
124	andcc		%g2, 0x7, %g1
125	be,pt		%icc, 2f
126	 and		%g2, 0x38, %g2
127
1281:	subcc		%g1, 0x1, %g1
129	EX_LD_FP(LOAD(ldub, %o1 + 0x00, %o3))
130	EX_ST_FP(STORE(stb, %o3, %o1 + GLOBAL_SPARE))
131	bgu,pt		%XCC, 1b
132	 add		%o1, 0x1, %o1
133
134	add		%o1, GLOBAL_SPARE, %o0
135
1362:	cmp		%g2, 0x0
137	and		%o1, 0x7, %g1
138	be,pt		%icc, 3f
139	 alignaddr	%o1, %g0, %o1
140
141	EX_LD_FP(LOAD(ldd, %o1, %f4))
1421:	EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f6))
143	add		%o1, 0x8, %o1
144	subcc		%g2, 0x8, %g2
145	faligndata	%f4, %f6, %f0
146	EX_ST_FP(STORE(std, %f0, %o0))
147	be,pn		%icc, 3f
148	 add		%o0, 0x8, %o0
149
150	EX_LD_FP(LOAD(ldd, %o1 + 0x8, %f4))
151	add		%o1, 0x8, %o1
152	subcc		%g2, 0x8, %g2
153	faligndata	%f6, %f4, %f2
154	EX_ST_FP(STORE(std, %f2, %o0))
155	bne,pt		%icc, 1b
156	 add		%o0, 0x8, %o0
157
1583:	LOAD(prefetch, %o1 + 0x000, #one_read)
159	LOAD(prefetch, %o1 + 0x040, #one_read)
160	andn		%o2, (0x40 - 1), GLOBAL_SPARE
161	LOAD(prefetch, %o1 + 0x080, #one_read)
162	LOAD(prefetch, %o1 + 0x0c0, #one_read)
163	LOAD(prefetch, %o1 + 0x100, #one_read)
164	EX_LD_FP(LOAD(ldd, %o1 + 0x000, %f0))
165	LOAD(prefetch, %o1 + 0x140, #one_read)
166	EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
167	LOAD(prefetch, %o1 + 0x180, #one_read)
168	EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
169	LOAD(prefetch, %o1 + 0x1c0, #one_read)
170	faligndata	%f0, %f2, %f16
171	EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
172	faligndata	%f2, %f4, %f18
173	EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
174	faligndata	%f4, %f6, %f20
175	EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
176	faligndata	%f6, %f8, %f22
177
178	EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
179	faligndata	%f8, %f10, %f24
180	EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
181	faligndata	%f10, %f12, %f26
182	EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
183
184	subcc		GLOBAL_SPARE, 0x80, GLOBAL_SPARE
185	add		%o1, 0x40, %o1
186	bgu,pt		%XCC, 1f
187	 srl		GLOBAL_SPARE, 6, %o3
188	ba,pt		%xcc, 2f
189	 nop
190
191	.align		64
1921:
193	EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
194	faligndata	%f12, %f14, %f28
195	EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
196	faligndata	%f14, %f0, %f30
197	EX_ST_FP(STORE_BLK(%f16, %o0))
198	EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
199	faligndata	%f0, %f2, %f16
200	add		%o0, 0x40, %o0
201
202	EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
203	faligndata	%f2, %f4, %f18
204	EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
205	faligndata	%f4, %f6, %f20
206	EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
207	subcc		%o3, 0x01, %o3
208	faligndata	%f6, %f8, %f22
209	EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
210
211	faligndata	%f8, %f10, %f24
212	EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
213	LOAD(prefetch, %o1 + 0x1c0, #one_read)
214	faligndata	%f10, %f12, %f26
215	bg,pt		%XCC, 1b
216	 add		%o1, 0x40, %o1
217
218	/* Finally we copy the last full 64-byte block. */
2192:
220	EX_LD_FP(LOAD(ldd, %o1 + 0x008, %f2))
221	faligndata	%f12, %f14, %f28
222	EX_LD_FP(LOAD(ldd, %o1 + 0x010, %f4))
223	faligndata	%f14, %f0, %f30
224	EX_ST_FP(STORE_BLK(%f16, %o0))
225	EX_LD_FP(LOAD(ldd, %o1 + 0x018, %f6))
226	faligndata	%f0, %f2, %f16
227	EX_LD_FP(LOAD(ldd, %o1 + 0x020, %f8))
228	faligndata	%f2, %f4, %f18
229	EX_LD_FP(LOAD(ldd, %o1 + 0x028, %f10))
230	faligndata	%f4, %f6, %f20
231	EX_LD_FP(LOAD(ldd, %o1 + 0x030, %f12))
232	faligndata	%f6, %f8, %f22
233	EX_LD_FP(LOAD(ldd, %o1 + 0x038, %f14))
234	faligndata	%f8, %f10, %f24
235	cmp		%g1, 0
236	be,pt		%XCC, 1f
237	 add		%o0, 0x40, %o0
238	EX_LD_FP(LOAD(ldd, %o1 + 0x040, %f0))
2391:	faligndata	%f10, %f12, %f26
240	faligndata	%f12, %f14, %f28
241	faligndata	%f14, %f0, %f30
242	EX_ST_FP(STORE_BLK(%f16, %o0))
243	add		%o0, 0x40, %o0
244	add		%o1, 0x40, %o1
245	membar		#Sync
246
247	/* Now we copy the (len modulo 64) bytes at the end.
248	 * Note how we borrow the %f0 loaded above.
249	 *
250	 * Also notice how this code is careful not to perform a
251	 * load past the end of the src buffer.
252	 */
253	and		%o2, 0x3f, %o2
254	andcc		%o2, 0x38, %g2
255	be,pn		%XCC, 2f
256	 subcc		%g2, 0x8, %g2
257	be,pn		%XCC, 2f
258	 cmp		%g1, 0
259
260	sub		%o2, %g2, %o2
261	be,a,pt		%XCC, 1f
262	 EX_LD_FP(LOAD(ldd, %o1 + 0x00, %f0))
263
2641:	EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f2))
265	add		%o1, 0x8, %o1
266	subcc		%g2, 0x8, %g2
267	faligndata	%f0, %f2, %f8
268	EX_ST_FP(STORE(std, %f8, %o0))
269	be,pn		%XCC, 2f
270	 add		%o0, 0x8, %o0
271	EX_LD_FP(LOAD(ldd, %o1 + 0x08, %f0))
272	add		%o1, 0x8, %o1
273	subcc		%g2, 0x8, %g2
274	faligndata	%f2, %f0, %f8
275	EX_ST_FP(STORE(std, %f8, %o0))
276	bne,pn		%XCC, 1b
277	 add		%o0, 0x8, %o0
278
279	/* If anything is left, we copy it one byte at a time.
280	 * Note that %g1 is (src & 0x3) saved above before the
281	 * alignaddr was performed.
282	 */
2832:
284	cmp		%o2, 0
285	add		%o1, %g1, %o1
286	VISExitHalf
287	be,pn		%XCC, 85f
288	 sub		%o0, %o1, %o3
289
290	andcc		%g1, 0x7, %g0
291	bne,pn		%icc, 90f
292	 andcc		%o2, 0x8, %g0
293	be,pt		%icc, 1f
294	 nop
295	EX_LD(LOAD(ldx, %o1, %o5))
296	EX_ST(STORE(stx, %o5, %o1 + %o3))
297	add		%o1, 0x8, %o1
 
298
2991:	andcc		%o2, 0x4, %g0
300	be,pt		%icc, 1f
301	 nop
302	EX_LD(LOAD(lduw, %o1, %o5))
303	EX_ST(STORE(stw, %o5, %o1 + %o3))
304	add		%o1, 0x4, %o1
 
305
3061:	andcc		%o2, 0x2, %g0
307	be,pt		%icc, 1f
308	 nop
309	EX_LD(LOAD(lduh, %o1, %o5))
310	EX_ST(STORE(sth, %o5, %o1 + %o3))
311	add		%o1, 0x2, %o1
 
312
3131:	andcc		%o2, 0x1, %g0
314	be,pt		%icc, 85f
315	 nop
316	EX_LD(LOAD(ldub, %o1, %o5))
317	ba,pt		%xcc, 85f
318	 EX_ST(STORE(stb, %o5, %o1 + %o3))
319
320	.align		64
32170: /* 16 < len <= 64 */
 
322	bne,pn		%XCC, 75f
323	 sub		%o0, %o1, %o3
324
32572:
326	andn		%o2, 0xf, GLOBAL_SPARE
327	and		%o2, 0xf, %o2
3281:	subcc		GLOBAL_SPARE, 0x10, GLOBAL_SPARE
329	EX_LD(LOAD(ldx, %o1 + 0x00, %o5))
330	EX_LD(LOAD(ldx, %o1 + 0x08, %g1))
331	EX_ST(STORE(stx, %o5, %o1 + %o3))
332	add		%o1, 0x8, %o1
333	EX_ST(STORE(stx, %g1, %o1 + %o3))
334	bgu,pt		%XCC, 1b
335	 add		%o1, 0x8, %o1
33673:	andcc		%o2, 0x8, %g0
337	be,pt		%XCC, 1f
338	 nop
339	sub		%o2, 0x8, %o2
340	EX_LD(LOAD(ldx, %o1, %o5))
341	EX_ST(STORE(stx, %o5, %o1 + %o3))
342	add		%o1, 0x8, %o1
3431:	andcc		%o2, 0x4, %g0
344	be,pt		%XCC, 1f
345	 nop
346	sub		%o2, 0x4, %o2
347	EX_LD(LOAD(lduw, %o1, %o5))
348	EX_ST(STORE(stw, %o5, %o1 + %o3))
349	add		%o1, 0x4, %o1
3501:	cmp		%o2, 0
351	be,pt		%XCC, 85f
352	 nop
353	ba,pt		%xcc, 90f
354	 nop
355
35675:
357	andcc		%o0, 0x7, %g1
358	sub		%g1, 0x8, %g1
359	be,pn		%icc, 2f
360	 sub		%g0, %g1, %g1
361	sub		%o2, %g1, %o2
362
3631:	subcc		%g1, 1, %g1
364	EX_LD(LOAD(ldub, %o1, %o5))
365	EX_ST(STORE(stb, %o5, %o1 + %o3))
366	bgu,pt		%icc, 1b
367	 add		%o1, 1, %o1
368
3692:	add		%o1, %o3, %o0
370	andcc		%o1, 0x7, %g1
371	bne,pt		%icc, 8f
372	 sll		%g1, 3, %g1
373
374	cmp		%o2, 16
375	bgeu,pt		%icc, 72b
376	 nop
377	ba,a,pt		%xcc, 73b
378
3798:	mov		64, %o3
380	andn		%o1, 0x7, %o1
381	EX_LD(LOAD(ldx, %o1, %g2))
382	sub		%o3, %g1, %o3
383	andn		%o2, 0x7, GLOBAL_SPARE
384	sllx		%g2, %g1, %g2
3851:	EX_LD(LOAD(ldx, %o1 + 0x8, %g3))
386	subcc		GLOBAL_SPARE, 0x8, GLOBAL_SPARE
387	add		%o1, 0x8, %o1
388	srlx		%g3, %o3, %o5
389	or		%o5, %g2, %o5
390	EX_ST(STORE(stx, %o5, %o0))
391	add		%o0, 0x8, %o0
392	bgu,pt		%icc, 1b
393	 sllx		%g3, %g1, %g2
394
395	srl		%g1, 3, %g1
396	andcc		%o2, 0x7, %o2
397	be,pn		%icc, 85f
398	 add		%o1, %g1, %o1
399	ba,pt		%xcc, 90f
400	 sub		%o0, %o1, %o3
401
402	.align		64
40380: /* 0 < len <= 16 */
 
404	andcc		%o3, 0x3, %g0
405	bne,pn		%XCC, 90f
406	 sub		%o0, %o1, %o3
407
4081:
409	subcc		%o2, 4, %o2
410	EX_LD(LOAD(lduw, %o1, %g1))
411	EX_ST(STORE(stw, %g1, %o1 + %o3))
412	bgu,pt		%XCC, 1b
413	 add		%o1, 4, %o1
414
41585:	retl
 
416	 mov		EX_RETVAL(%o4), %o0
417
418	.align		32
41990:
420	subcc		%o2, 1, %o2
421	EX_LD(LOAD(ldub, %o1, %g1))
422	EX_ST(STORE(stb, %g1, %o1 + %o3))
423	bgu,pt		%XCC, 90b
424	 add		%o1, 1, %o1
425	retl
426	 mov		EX_RETVAL(%o4), %o0
427
428	.size		FUNC_NAME, .-FUNC_NAME