Loading...
1/* memcpy.S: Sparc optimized memcpy and memmove code
2 * Hand optimized from GNU libc's memcpy and memmove
3 * Copyright (C) 1991,1996 Free Software Foundation
4 * Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi)
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
9
10#define FUNC(x) \
11 .globl x; \
12 .type x,@function; \
13 .align 4; \
14x:
15
16/* Both these macros have to start with exactly the same insn */
17#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
18 ldd [%src + (offset) + 0x00], %t0; \
19 ldd [%src + (offset) + 0x08], %t2; \
20 ldd [%src + (offset) + 0x10], %t4; \
21 ldd [%src + (offset) + 0x18], %t6; \
22 st %t0, [%dst + (offset) + 0x00]; \
23 st %t1, [%dst + (offset) + 0x04]; \
24 st %t2, [%dst + (offset) + 0x08]; \
25 st %t3, [%dst + (offset) + 0x0c]; \
26 st %t4, [%dst + (offset) + 0x10]; \
27 st %t5, [%dst + (offset) + 0x14]; \
28 st %t6, [%dst + (offset) + 0x18]; \
29 st %t7, [%dst + (offset) + 0x1c];
30
31#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
32 ldd [%src + (offset) + 0x00], %t0; \
33 ldd [%src + (offset) + 0x08], %t2; \
34 ldd [%src + (offset) + 0x10], %t4; \
35 ldd [%src + (offset) + 0x18], %t6; \
36 std %t0, [%dst + (offset) + 0x00]; \
37 std %t2, [%dst + (offset) + 0x08]; \
38 std %t4, [%dst + (offset) + 0x10]; \
39 std %t6, [%dst + (offset) + 0x18];
40
41#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
42 ldd [%src - (offset) - 0x10], %t0; \
43 ldd [%src - (offset) - 0x08], %t2; \
44 st %t0, [%dst - (offset) - 0x10]; \
45 st %t1, [%dst - (offset) - 0x0c]; \
46 st %t2, [%dst - (offset) - 0x08]; \
47 st %t3, [%dst - (offset) - 0x04];
48
49#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
50 ldd [%src - (offset) - 0x10], %t0; \
51 ldd [%src - (offset) - 0x08], %t2; \
52 std %t0, [%dst - (offset) - 0x10]; \
53 std %t2, [%dst - (offset) - 0x08];
54
55#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
56 ldub [%src - (offset) - 0x02], %t0; \
57 ldub [%src - (offset) - 0x01], %t1; \
58 stb %t0, [%dst - (offset) - 0x02]; \
59 stb %t1, [%dst - (offset) - 0x01];
60
61/* Both these macros have to start with exactly the same insn */
62#define RMOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
63 ldd [%src - (offset) - 0x20], %t0; \
64 ldd [%src - (offset) - 0x18], %t2; \
65 ldd [%src - (offset) - 0x10], %t4; \
66 ldd [%src - (offset) - 0x08], %t6; \
67 st %t0, [%dst - (offset) - 0x20]; \
68 st %t1, [%dst - (offset) - 0x1c]; \
69 st %t2, [%dst - (offset) - 0x18]; \
70 st %t3, [%dst - (offset) - 0x14]; \
71 st %t4, [%dst - (offset) - 0x10]; \
72 st %t5, [%dst - (offset) - 0x0c]; \
73 st %t6, [%dst - (offset) - 0x08]; \
74 st %t7, [%dst - (offset) - 0x04];
75
76#define RMOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
77 ldd [%src - (offset) - 0x20], %t0; \
78 ldd [%src - (offset) - 0x18], %t2; \
79 ldd [%src - (offset) - 0x10], %t4; \
80 ldd [%src - (offset) - 0x08], %t6; \
81 std %t0, [%dst - (offset) - 0x20]; \
82 std %t2, [%dst - (offset) - 0x18]; \
83 std %t4, [%dst - (offset) - 0x10]; \
84 std %t6, [%dst - (offset) - 0x08];
85
86#define RMOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
87 ldd [%src + (offset) + 0x00], %t0; \
88 ldd [%src + (offset) + 0x08], %t2; \
89 st %t0, [%dst + (offset) + 0x00]; \
90 st %t1, [%dst + (offset) + 0x04]; \
91 st %t2, [%dst + (offset) + 0x08]; \
92 st %t3, [%dst + (offset) + 0x0c];
93
94#define RMOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
95 ldub [%src + (offset) + 0x00], %t0; \
96 ldub [%src + (offset) + 0x01], %t1; \
97 stb %t0, [%dst + (offset) + 0x00]; \
98 stb %t1, [%dst + (offset) + 0x01];
99
100#define SMOVE_CHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
101 ldd [%src + (offset) + 0x00], %t0; \
102 ldd [%src + (offset) + 0x08], %t2; \
103 srl %t0, shir, %t5; \
104 srl %t1, shir, %t6; \
105 sll %t0, shil, %t0; \
106 or %t5, %prev, %t5; \
107 sll %t1, shil, %prev; \
108 or %t6, %t0, %t0; \
109 srl %t2, shir, %t1; \
110 srl %t3, shir, %t6; \
111 sll %t2, shil, %t2; \
112 or %t1, %prev, %t1; \
113 std %t4, [%dst + (offset) + (offset2) - 0x04]; \
114 std %t0, [%dst + (offset) + (offset2) + 0x04]; \
115 sll %t3, shil, %prev; \
116 or %t6, %t2, %t4;
117
118#define SMOVE_ALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, prev, shil, shir, offset2) \
119 ldd [%src + (offset) + 0x00], %t0; \
120 ldd [%src + (offset) + 0x08], %t2; \
121 srl %t0, shir, %t4; \
122 srl %t1, shir, %t5; \
123 sll %t0, shil, %t6; \
124 or %t4, %prev, %t0; \
125 sll %t1, shil, %prev; \
126 or %t5, %t6, %t1; \
127 srl %t2, shir, %t4; \
128 srl %t3, shir, %t5; \
129 sll %t2, shil, %t6; \
130 or %t4, %prev, %t2; \
131 sll %t3, shil, %prev; \
132 or %t5, %t6, %t3; \
133 std %t0, [%dst + (offset) + (offset2) + 0x00]; \
134 std %t2, [%dst + (offset) + (offset2) + 0x08];
135
136 .text
137 .align 4
138
1390:
140 retl
141 nop ! Only bcopy returns here and it retuns void...
142
143#ifdef __KERNEL__
144FUNC(amemmove)
145FUNC(__memmove)
146#endif
147FUNC(memmove)
148 cmp %o0, %o1
149 mov %o0, %g7
150 bleu 9f
151 sub %o0, %o1, %o4
152
153 add %o1, %o2, %o3
154 cmp %o3, %o0
155 bleu 0f
156 andcc %o4, 3, %o5
157
158 add %o1, %o2, %o1
159 add %o0, %o2, %o0
160 sub %o1, 1, %o1
161 sub %o0, 1, %o0
162
1631: /* reverse_bytes */
164
165 ldub [%o1], %o4
166 subcc %o2, 1, %o2
167 stb %o4, [%o0]
168 sub %o1, 1, %o1
169 bne 1b
170 sub %o0, 1, %o0
171
172 retl
173 mov %g7, %o0
174
175/* NOTE: This code is executed just for the cases,
176 where %src (=%o1) & 3 is != 0.
177 We need to align it to 4. So, for (%src & 3)
178 1 we need to do ldub,lduh
179 2 lduh
180 3 just ldub
181 so even if it looks weird, the branches
182 are correct here. -jj
183 */
18478: /* dword_align */
185
186 andcc %o1, 1, %g0
187 be 4f
188 andcc %o1, 2, %g0
189
190 ldub [%o1], %g2
191 add %o1, 1, %o1
192 stb %g2, [%o0]
193 sub %o2, 1, %o2
194 bne 3f
195 add %o0, 1, %o0
1964:
197 lduh [%o1], %g2
198 add %o1, 2, %o1
199 sth %g2, [%o0]
200 sub %o2, 2, %o2
201 b 3f
202 add %o0, 2, %o0
203
204FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
205
206 sub %o0, %o1, %o4
207 mov %o0, %g7
2089:
209 andcc %o4, 3, %o5
2100:
211 bne 86f
212 cmp %o2, 15
213
214 bleu 90f
215 andcc %o1, 3, %g0
216
217 bne 78b
2183:
219 andcc %o1, 4, %g0
220
221 be 2f
222 mov %o2, %g1
223
224 ld [%o1], %o4
225 sub %g1, 4, %g1
226 st %o4, [%o0]
227 add %o1, 4, %o1
228 add %o0, 4, %o0
2292:
230 andcc %g1, 0xffffff80, %g0
231 be 3f
232 andcc %o0, 4, %g0
233
234 be 82f + 4
2355:
236 MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
237 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
238 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
239 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
240 sub %g1, 128, %g1
241 add %o1, 128, %o1
242 cmp %g1, 128
243 bge 5b
244 add %o0, 128, %o0
2453:
246 andcc %g1, 0x70, %g4
247 be 80f
248 andcc %g1, 8, %g0
249
250 sethi %hi(80f), %o5
251 srl %g4, 1, %o4
252 add %g4, %o4, %o4
253 add %o1, %g4, %o1
254 sub %o5, %o4, %o5
255 jmpl %o5 + %lo(80f), %g0
256 add %o0, %g4, %o0
257
25879: /* memcpy_table */
259
260 MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
261 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
262 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
263 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
264 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
265 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
266 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
267
26880: /* memcpy_table_end */
269 be 81f
270 andcc %g1, 4, %g0
271
272 ldd [%o1], %g2
273 add %o0, 8, %o0
274 st %g2, [%o0 - 0x08]
275 add %o1, 8, %o1
276 st %g3, [%o0 - 0x04]
277
27881: /* memcpy_last7 */
279
280 be 1f
281 andcc %g1, 2, %g0
282
283 ld [%o1], %g2
284 add %o1, 4, %o1
285 st %g2, [%o0]
286 add %o0, 4, %o0
2871:
288 be 1f
289 andcc %g1, 1, %g0
290
291 lduh [%o1], %g2
292 add %o1, 2, %o1
293 sth %g2, [%o0]
294 add %o0, 2, %o0
2951:
296 be 1f
297 nop
298
299 ldub [%o1], %g2
300 stb %g2, [%o0]
3011:
302 retl
303 mov %g7, %o0
304
30582: /* ldd_std */
306 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
307 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
308 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
309 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
310 subcc %g1, 128, %g1
311 add %o1, 128, %o1
312 cmp %g1, 128
313 bge 82b
314 add %o0, 128, %o0
315
316 andcc %g1, 0x70, %g4
317 be 84f
318 andcc %g1, 8, %g0
319
320 sethi %hi(84f), %o5
321 add %o1, %g4, %o1
322 sub %o5, %g4, %o5
323 jmpl %o5 + %lo(84f), %g0
324 add %o0, %g4, %o0
325
32683: /* amemcpy_table */
327
328 MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
329 MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
330 MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
331 MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
332 MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
333 MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
334 MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
335
33684: /* amemcpy_table_end */
337 be 85f
338 andcc %g1, 4, %g0
339
340 ldd [%o1], %g2
341 add %o0, 8, %o0
342 std %g2, [%o0 - 0x08]
343 add %o1, 8, %o1
34485: /* amemcpy_last7 */
345 be 1f
346 andcc %g1, 2, %g0
347
348 ld [%o1], %g2
349 add %o1, 4, %o1
350 st %g2, [%o0]
351 add %o0, 4, %o0
3521:
353 be 1f
354 andcc %g1, 1, %g0
355
356 lduh [%o1], %g2
357 add %o1, 2, %o1
358 sth %g2, [%o0]
359 add %o0, 2, %o0
3601:
361 be 1f
362 nop
363
364 ldub [%o1], %g2
365 stb %g2, [%o0]
3661:
367 retl
368 mov %g7, %o0
369
37086: /* non_aligned */
371 cmp %o2, 6
372 bleu 88f
373 nop
374
375 save %sp, -96, %sp
376 andcc %i0, 3, %g0
377 be 61f
378 andcc %i0, 1, %g0
379 be 60f
380 andcc %i0, 2, %g0
381
382 ldub [%i1], %g5
383 add %i1, 1, %i1
384 stb %g5, [%i0]
385 sub %i2, 1, %i2
386 bne 61f
387 add %i0, 1, %i0
38860:
389 ldub [%i1], %g3
390 add %i1, 2, %i1
391 stb %g3, [%i0]
392 sub %i2, 2, %i2
393 ldub [%i1 - 1], %g3
394 add %i0, 2, %i0
395 stb %g3, [%i0 - 1]
39661:
397 and %i1, 3, %g2
398 and %i2, 0xc, %g3
399 and %i1, -4, %i1
400 cmp %g3, 4
401 sll %g2, 3, %g4
402 mov 32, %g2
403 be 4f
404 sub %g2, %g4, %l0
405
406 blu 3f
407 cmp %g3, 0x8
408
409 be 2f
410 srl %i2, 2, %g3
411
412 ld [%i1], %i3
413 add %i0, -8, %i0
414 ld [%i1 + 4], %i4
415 b 8f
416 add %g3, 1, %g3
4172:
418 ld [%i1], %i4
419 add %i0, -12, %i0
420 ld [%i1 + 4], %i5
421 add %g3, 2, %g3
422 b 9f
423 add %i1, -4, %i1
4243:
425 ld [%i1], %g1
426 add %i0, -4, %i0
427 ld [%i1 + 4], %i3
428 srl %i2, 2, %g3
429 b 7f
430 add %i1, 4, %i1
4314:
432 ld [%i1], %i5
433 cmp %i2, 7
434 ld [%i1 + 4], %g1
435 srl %i2, 2, %g3
436 bleu 10f
437 add %i1, 8, %i1
438
439 ld [%i1], %i3
440 add %g3, -1, %g3
4415:
442 sll %i5, %g4, %g2
443 srl %g1, %l0, %g5
444 or %g2, %g5, %g2
445 st %g2, [%i0]
4467:
447 ld [%i1 + 4], %i4
448 sll %g1, %g4, %g2
449 srl %i3, %l0, %g5
450 or %g2, %g5, %g2
451 st %g2, [%i0 + 4]
4528:
453 ld [%i1 + 8], %i5
454 sll %i3, %g4, %g2
455 srl %i4, %l0, %g5
456 or %g2, %g5, %g2
457 st %g2, [%i0 + 8]
4589:
459 ld [%i1 + 12], %g1
460 sll %i4, %g4, %g2
461 srl %i5, %l0, %g5
462 addcc %g3, -4, %g3
463 or %g2, %g5, %g2
464 add %i1, 16, %i1
465 st %g2, [%i0 + 12]
466 add %i0, 16, %i0
467 bne,a 5b
468 ld [%i1], %i3
46910:
470 sll %i5, %g4, %g2
471 srl %g1, %l0, %g5
472 srl %l0, 3, %g3
473 or %g2, %g5, %g2
474 sub %i1, %g3, %i1
475 andcc %i2, 2, %g0
476 st %g2, [%i0]
477 be 1f
478 andcc %i2, 1, %g0
479
480 ldub [%i1], %g2
481 add %i1, 2, %i1
482 stb %g2, [%i0 + 4]
483 add %i0, 2, %i0
484 ldub [%i1 - 1], %g2
485 stb %g2, [%i0 + 3]
4861:
487 be 1f
488 nop
489 ldub [%i1], %g2
490 stb %g2, [%i0 + 4]
4911:
492 ret
493 restore %g7, %g0, %o0
494
49588: /* short_end */
496
497 and %o2, 0xe, %o3
49820:
499 sethi %hi(89f), %o5
500 sll %o3, 3, %o4
501 add %o0, %o3, %o0
502 sub %o5, %o4, %o5
503 add %o1, %o3, %o1
504 jmpl %o5 + %lo(89f), %g0
505 andcc %o2, 1, %g0
506
507 MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
508 MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
509 MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
510 MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
511 MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
512 MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
513 MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
514
51589: /* short_table_end */
516
517 be 1f
518 nop
519
520 ldub [%o1], %g2
521 stb %g2, [%o0]
5221:
523 retl
524 mov %g7, %o0
525
52690: /* short_aligned_end */
527 bne 88b
528 andcc %o2, 8, %g0
529
530 be 1f
531 andcc %o2, 4, %g0
532
533 ld [%o1 + 0x00], %g2
534 ld [%o1 + 0x04], %g3
535 add %o1, 8, %o1
536 st %g2, [%o0 + 0x00]
537 st %g3, [%o0 + 0x04]
538 add %o0, 8, %o0
5391:
540 b 81b
541 mov %o2, %g1
1/* memcpy.S: Sparc optimized memcpy and memmove code
2 * Hand optimized from GNU libc's memcpy and memmove
3 * Copyright (C) 1991,1996 Free Software Foundation
4 * Copyright (C) 1995 Linus Torvalds (Linus.Torvalds@helsinki.fi)
5 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 */
9
10#include <asm/export.h>
11#define FUNC(x) \
12 .globl x; \
13 .type x,@function; \
14 .align 4; \
15x:
16
17/* Both these macros have to start with exactly the same insn */
18#define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
19 ldd [%src + (offset) + 0x00], %t0; \
20 ldd [%src + (offset) + 0x08], %t2; \
21 ldd [%src + (offset) + 0x10], %t4; \
22 ldd [%src + (offset) + 0x18], %t6; \
23 st %t0, [%dst + (offset) + 0x00]; \
24 st %t1, [%dst + (offset) + 0x04]; \
25 st %t2, [%dst + (offset) + 0x08]; \
26 st %t3, [%dst + (offset) + 0x0c]; \
27 st %t4, [%dst + (offset) + 0x10]; \
28 st %t5, [%dst + (offset) + 0x14]; \
29 st %t6, [%dst + (offset) + 0x18]; \
30 st %t7, [%dst + (offset) + 0x1c];
31
32#define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
33 ldd [%src + (offset) + 0x00], %t0; \
34 ldd [%src + (offset) + 0x08], %t2; \
35 ldd [%src + (offset) + 0x10], %t4; \
36 ldd [%src + (offset) + 0x18], %t6; \
37 std %t0, [%dst + (offset) + 0x00]; \
38 std %t2, [%dst + (offset) + 0x08]; \
39 std %t4, [%dst + (offset) + 0x10]; \
40 std %t6, [%dst + (offset) + 0x18];
41
42#define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
43 ldd [%src - (offset) - 0x10], %t0; \
44 ldd [%src - (offset) - 0x08], %t2; \
45 st %t0, [%dst - (offset) - 0x10]; \
46 st %t1, [%dst - (offset) - 0x0c]; \
47 st %t2, [%dst - (offset) - 0x08]; \
48 st %t3, [%dst - (offset) - 0x04];
49
50#define MOVE_LASTALIGNCHUNK(src, dst, offset, t0, t1, t2, t3) \
51 ldd [%src - (offset) - 0x10], %t0; \
52 ldd [%src - (offset) - 0x08], %t2; \
53 std %t0, [%dst - (offset) - 0x10]; \
54 std %t2, [%dst - (offset) - 0x08];
55
56#define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
57 ldub [%src - (offset) - 0x02], %t0; \
58 ldub [%src - (offset) - 0x01], %t1; \
59 stb %t0, [%dst - (offset) - 0x02]; \
60 stb %t1, [%dst - (offset) - 0x01];
61
62 .text
63 .align 4
64
65FUNC(memmove)
66EXPORT_SYMBOL(memmove)
67 cmp %o0, %o1
68 mov %o0, %g7
69 bleu 9f
70 sub %o0, %o1, %o4
71
72 add %o1, %o2, %o3
73 cmp %o3, %o0
74 bleu 0f
75 andcc %o4, 3, %o5
76
77 add %o1, %o2, %o1
78 add %o0, %o2, %o0
79 sub %o1, 1, %o1
80 sub %o0, 1, %o0
81
821: /* reverse_bytes */
83
84 ldub [%o1], %o4
85 subcc %o2, 1, %o2
86 stb %o4, [%o0]
87 sub %o1, 1, %o1
88 bne 1b
89 sub %o0, 1, %o0
90
91 retl
92 mov %g7, %o0
93
94/* NOTE: This code is executed just for the cases,
95 where %src (=%o1) & 3 is != 0.
96 We need to align it to 4. So, for (%src & 3)
97 1 we need to do ldub,lduh
98 2 lduh
99 3 just ldub
100 so even if it looks weird, the branches
101 are correct here. -jj
102 */
10378: /* dword_align */
104
105 andcc %o1, 1, %g0
106 be 4f
107 andcc %o1, 2, %g0
108
109 ldub [%o1], %g2
110 add %o1, 1, %o1
111 stb %g2, [%o0]
112 sub %o2, 1, %o2
113 bne 3f
114 add %o0, 1, %o0
1154:
116 lduh [%o1], %g2
117 add %o1, 2, %o1
118 sth %g2, [%o0]
119 sub %o2, 2, %o2
120 b 3f
121 add %o0, 2, %o0
122
123FUNC(memcpy) /* %o0=dst %o1=src %o2=len */
124EXPORT_SYMBOL(memcpy)
125
126 sub %o0, %o1, %o4
127 mov %o0, %g7
1289:
129 andcc %o4, 3, %o5
1300:
131 bne 86f
132 cmp %o2, 15
133
134 bleu 90f
135 andcc %o1, 3, %g0
136
137 bne 78b
1383:
139 andcc %o1, 4, %g0
140
141 be 2f
142 mov %o2, %g1
143
144 ld [%o1], %o4
145 sub %g1, 4, %g1
146 st %o4, [%o0]
147 add %o1, 4, %o1
148 add %o0, 4, %o0
1492:
150 andcc %g1, 0xffffff80, %g0
151 be 3f
152 andcc %o0, 4, %g0
153
154 be 82f + 4
1555:
156 MOVE_BIGCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
157 MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
158 MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
159 MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
160 sub %g1, 128, %g1
161 add %o1, 128, %o1
162 cmp %g1, 128
163 bge 5b
164 add %o0, 128, %o0
1653:
166 andcc %g1, 0x70, %g4
167 be 80f
168 andcc %g1, 8, %g0
169
170 sethi %hi(80f), %o5
171 srl %g4, 1, %o4
172 add %g4, %o4, %o4
173 add %o1, %g4, %o1
174 sub %o5, %o4, %o5
175 jmpl %o5 + %lo(80f), %g0
176 add %o0, %g4, %o0
177
17879: /* memcpy_table */
179
180 MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
181 MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
182 MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
183 MOVE_LASTCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
184 MOVE_LASTCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
185 MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
186 MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
187
18880: /* memcpy_table_end */
189 be 81f
190 andcc %g1, 4, %g0
191
192 ldd [%o1], %g2
193 add %o0, 8, %o0
194 st %g2, [%o0 - 0x08]
195 add %o1, 8, %o1
196 st %g3, [%o0 - 0x04]
197
19881: /* memcpy_last7 */
199
200 be 1f
201 andcc %g1, 2, %g0
202
203 ld [%o1], %g2
204 add %o1, 4, %o1
205 st %g2, [%o0]
206 add %o0, 4, %o0
2071:
208 be 1f
209 andcc %g1, 1, %g0
210
211 lduh [%o1], %g2
212 add %o1, 2, %o1
213 sth %g2, [%o0]
214 add %o0, 2, %o0
2151:
216 be 1f
217 nop
218
219 ldub [%o1], %g2
220 stb %g2, [%o0]
2211:
222 retl
223 mov %g7, %o0
224
22582: /* ldd_std */
226 MOVE_BIGALIGNCHUNK(o1, o0, 0x00, o2, o3, o4, o5, g2, g3, g4, g5)
227 MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
228 MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
229 MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
230 subcc %g1, 128, %g1
231 add %o1, 128, %o1
232 cmp %g1, 128
233 bge 82b
234 add %o0, 128, %o0
235
236 andcc %g1, 0x70, %g4
237 be 84f
238 andcc %g1, 8, %g0
239
240 sethi %hi(84f), %o5
241 add %o1, %g4, %o1
242 sub %o5, %g4, %o5
243 jmpl %o5 + %lo(84f), %g0
244 add %o0, %g4, %o0
245
24683: /* amemcpy_table */
247
248 MOVE_LASTALIGNCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
249 MOVE_LASTALIGNCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
250 MOVE_LASTALIGNCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
251 MOVE_LASTALIGNCHUNK(o1, o0, 0x30, g2, g3, g4, g5)
252 MOVE_LASTALIGNCHUNK(o1, o0, 0x20, g2, g3, g4, g5)
253 MOVE_LASTALIGNCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
254 MOVE_LASTALIGNCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
255
25684: /* amemcpy_table_end */
257 be 85f
258 andcc %g1, 4, %g0
259
260 ldd [%o1], %g2
261 add %o0, 8, %o0
262 std %g2, [%o0 - 0x08]
263 add %o1, 8, %o1
26485: /* amemcpy_last7 */
265 be 1f
266 andcc %g1, 2, %g0
267
268 ld [%o1], %g2
269 add %o1, 4, %o1
270 st %g2, [%o0]
271 add %o0, 4, %o0
2721:
273 be 1f
274 andcc %g1, 1, %g0
275
276 lduh [%o1], %g2
277 add %o1, 2, %o1
278 sth %g2, [%o0]
279 add %o0, 2, %o0
2801:
281 be 1f
282 nop
283
284 ldub [%o1], %g2
285 stb %g2, [%o0]
2861:
287 retl
288 mov %g7, %o0
289
29086: /* non_aligned */
291 cmp %o2, 6
292 bleu 88f
293 nop
294
295 save %sp, -96, %sp
296 andcc %i0, 3, %g0
297 be 61f
298 andcc %i0, 1, %g0
299 be 60f
300 andcc %i0, 2, %g0
301
302 ldub [%i1], %g5
303 add %i1, 1, %i1
304 stb %g5, [%i0]
305 sub %i2, 1, %i2
306 bne 61f
307 add %i0, 1, %i0
30860:
309 ldub [%i1], %g3
310 add %i1, 2, %i1
311 stb %g3, [%i0]
312 sub %i2, 2, %i2
313 ldub [%i1 - 1], %g3
314 add %i0, 2, %i0
315 stb %g3, [%i0 - 1]
31661:
317 and %i1, 3, %g2
318 and %i2, 0xc, %g3
319 and %i1, -4, %i1
320 cmp %g3, 4
321 sll %g2, 3, %g4
322 mov 32, %g2
323 be 4f
324 sub %g2, %g4, %l0
325
326 blu 3f
327 cmp %g3, 0x8
328
329 be 2f
330 srl %i2, 2, %g3
331
332 ld [%i1], %i3
333 add %i0, -8, %i0
334 ld [%i1 + 4], %i4
335 b 8f
336 add %g3, 1, %g3
3372:
338 ld [%i1], %i4
339 add %i0, -12, %i0
340 ld [%i1 + 4], %i5
341 add %g3, 2, %g3
342 b 9f
343 add %i1, -4, %i1
3443:
345 ld [%i1], %g1
346 add %i0, -4, %i0
347 ld [%i1 + 4], %i3
348 srl %i2, 2, %g3
349 b 7f
350 add %i1, 4, %i1
3514:
352 ld [%i1], %i5
353 cmp %i2, 7
354 ld [%i1 + 4], %g1
355 srl %i2, 2, %g3
356 bleu 10f
357 add %i1, 8, %i1
358
359 ld [%i1], %i3
360 add %g3, -1, %g3
3615:
362 sll %i5, %g4, %g2
363 srl %g1, %l0, %g5
364 or %g2, %g5, %g2
365 st %g2, [%i0]
3667:
367 ld [%i1 + 4], %i4
368 sll %g1, %g4, %g2
369 srl %i3, %l0, %g5
370 or %g2, %g5, %g2
371 st %g2, [%i0 + 4]
3728:
373 ld [%i1 + 8], %i5
374 sll %i3, %g4, %g2
375 srl %i4, %l0, %g5
376 or %g2, %g5, %g2
377 st %g2, [%i0 + 8]
3789:
379 ld [%i1 + 12], %g1
380 sll %i4, %g4, %g2
381 srl %i5, %l0, %g5
382 addcc %g3, -4, %g3
383 or %g2, %g5, %g2
384 add %i1, 16, %i1
385 st %g2, [%i0 + 12]
386 add %i0, 16, %i0
387 bne,a 5b
388 ld [%i1], %i3
38910:
390 sll %i5, %g4, %g2
391 srl %g1, %l0, %g5
392 srl %l0, 3, %g3
393 or %g2, %g5, %g2
394 sub %i1, %g3, %i1
395 andcc %i2, 2, %g0
396 st %g2, [%i0]
397 be 1f
398 andcc %i2, 1, %g0
399
400 ldub [%i1], %g2
401 add %i1, 2, %i1
402 stb %g2, [%i0 + 4]
403 add %i0, 2, %i0
404 ldub [%i1 - 1], %g2
405 stb %g2, [%i0 + 3]
4061:
407 be 1f
408 nop
409 ldub [%i1], %g2
410 stb %g2, [%i0 + 4]
4111:
412 ret
413 restore %g7, %g0, %o0
414
41588: /* short_end */
416
417 and %o2, 0xe, %o3
41820:
419 sethi %hi(89f), %o5
420 sll %o3, 3, %o4
421 add %o0, %o3, %o0
422 sub %o5, %o4, %o5
423 add %o1, %o3, %o1
424 jmpl %o5 + %lo(89f), %g0
425 andcc %o2, 1, %g0
426
427 MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
428 MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
429 MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
430 MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
431 MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
432 MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
433 MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
434
43589: /* short_table_end */
436
437 be 1f
438 nop
439
440 ldub [%o1], %g2
441 stb %g2, [%o0]
4421:
443 retl
444 mov %g7, %o0
445
44690: /* short_aligned_end */
447 bne 88b
448 andcc %o2, 8, %g0
449
450 be 1f
451 andcc %o2, 4, %g0
452
453 ld [%o1 + 0x00], %g2
454 ld [%o1 + 0x04], %g3
455 add %o1, 8, %o1
456 st %g2, [%o0 + 0x00]
457 st %g3, [%o0 + 0x04]
458 add %o0, 8, %o0
4591:
460 b 81b
461 mov %o2, %g1