Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  linux/arch/arm/lib/csumpartialcopygeneric.S
  4 *
  5 *  Copyright (C) 1995-2001 Russell King
 
 
 
 
  6 */
  7#include <asm/assembler.h>
  8
  9/*
 10 * unsigned int
 11 * csum_partial_copy_xxx(const char *src, char *dst, int len, int sum, )
 12 *  r0 = src, r1 = dst, r2 = len, r3 = sum
 13 *  Returns : r0 = checksum
 14 *
 15 * Note that 'tst' and 'teq' preserve the carry flag.
 16 */
 17
 18src	.req	r0
 19dst	.req	r1
 20len	.req	r2
 21sum	.req	r3
 22
 23.Lzero:		mov	r0, sum
 24		load_regs
 25
 26		/*
 27		 * Align an unaligned destination pointer.  We know that
 28		 * we have >= 8 bytes here, so we don't need to check
 29		 * the length.  Note that the source pointer hasn't been
 30		 * aligned yet.
 31		 */
 32.Ldst_unaligned:
 33		tst	dst, #1
 34		beq	.Ldst_16bit
 35
 36		load1b	ip
 37		sub	len, len, #1
 38		adcs	sum, sum, ip, put_byte_1	@ update checksum
 39		strb	ip, [dst], #1
 40		tst	dst, #2
 41		reteq	lr			@ dst is now 32bit aligned
 42
 43.Ldst_16bit:	load2b	r8, ip
 44		sub	len, len, #2
 45		adcs	sum, sum, r8, put_byte_0
 46		strb	r8, [dst], #1
 47		adcs	sum, sum, ip, put_byte_1
 48		strb	ip, [dst], #1
 49		ret	lr			@ dst is now 32bit aligned
 50
 51		/*
 52		 * Handle 0 to 7 bytes, with any alignment of source and
 53		 * destination pointers.  Note that when we get here, C = 0
 54		 */
 55.Lless8:	teq	len, #0			@ check for zero count
 56		beq	.Lzero
 57
 58		/* we must have at least one byte. */
 59		tst	dst, #1			@ dst 16-bit aligned
 60		beq	.Lless8_aligned
 61
 62		/* Align dst */
 63		load1b	ip
 64		sub	len, len, #1
 65		adcs	sum, sum, ip, put_byte_1	@ update checksum
 66		strb	ip, [dst], #1
 67		tst	len, #6
 68		beq	.Lless8_byteonly
 69
 701:		load2b	r8, ip
 71		sub	len, len, #2
 72		adcs	sum, sum, r8, put_byte_0
 73		strb	r8, [dst], #1
 74		adcs	sum, sum, ip, put_byte_1
 75		strb	ip, [dst], #1
 76.Lless8_aligned:
 77		tst	len, #6
 78		bne	1b
 79.Lless8_byteonly:
 80		tst	len, #1
 81		beq	.Ldone
 82		load1b	r8
 83		adcs	sum, sum, r8, put_byte_0	@ update checksum
 84		strb	r8, [dst], #1
 85		b	.Ldone
 86
 87FN_ENTRY
 88		save_regs
 89		mov	sum, #-1
 90
 91		cmp	len, #8			@ Ensure that we have at least
 92		blo	.Lless8			@ 8 bytes to copy.
 93
 94		adds	sum, sum, #0		@ C = 0
 95		tst	dst, #3			@ Test destination alignment
 96		blne	.Ldst_unaligned		@ align destination, return here
 97
 98		/*
 99		 * Ok, the dst pointer is now 32bit aligned, and we know
100		 * that we must have more than 4 bytes to copy.  Note
101		 * that C contains the carry from the dst alignment above.
102		 */
103
104		tst	src, #3			@ Test source alignment
105		bne	.Lsrc_not_aligned
106
107		/* Routine for src & dst aligned */
108
109		bics	ip, len, #15
110		beq	2f
111
1121:		load4l	r4, r5, r6, r7
113		stmia	dst!, {r4, r5, r6, r7}
114		adcs	sum, sum, r4
115		adcs	sum, sum, r5
116		adcs	sum, sum, r6
117		adcs	sum, sum, r7
118		sub	ip, ip, #16
119		teq	ip, #0
120		bne	1b
121
1222:		ands	ip, len, #12
123		beq	4f
124		tst	ip, #8
125		beq	3f
126		load2l	r4, r5
127		stmia	dst!, {r4, r5}
128		adcs	sum, sum, r4
129		adcs	sum, sum, r5
130		tst	ip, #4
131		beq	4f
132
1333:		load1l	r4
134		str	r4, [dst], #4
135		adcs	sum, sum, r4
136
1374:		ands	len, len, #3
138		beq	.Ldone
139		load1l	r4
140		tst	len, #2
141		mov	r5, r4, get_byte_0
142		beq	.Lexit
143		adcs	sum, sum, r4, lspush #16
144		strb	r5, [dst], #1
145		mov	r5, r4, get_byte_1
146		strb	r5, [dst], #1
147		mov	r5, r4, get_byte_2
148.Lexit:		tst	len, #1
149		strbne	r5, [dst], #1
150		andne	r5, r5, #255
151		adcsne	sum, sum, r5, put_byte_0
152
153		/*
154		 * If the dst pointer was not 16-bit aligned, we
155		 * need to rotate the checksum here to get around
156		 * the inefficient byte manipulations in the
157		 * architecture independent code.
158		 */
159.Ldone:		adc	r0, sum, #0
160		ldr	sum, [sp, #0]		@ dst
161		tst	sum, #1
162		movne	r0, r0, ror #8
163		load_regs
164
165.Lsrc_not_aligned:
166		adc	sum, sum, #0		@ include C from dst alignment
167		and	ip, src, #3
168		bic	src, src, #3
169		load1l	r5
170		cmp	ip, #2
171		beq	.Lsrc2_aligned
172		bhi	.Lsrc3_aligned
173		mov	r4, r5, lspull #8		@ C = 0
174		bics	ip, len, #15
175		beq	2f
1761:		load4l	r5, r6, r7, r8
177		orr	r4, r4, r5, lspush #24
178		mov	r5, r5, lspull #8
179		orr	r5, r5, r6, lspush #24
180		mov	r6, r6, lspull #8
181		orr	r6, r6, r7, lspush #24
182		mov	r7, r7, lspull #8
183		orr	r7, r7, r8, lspush #24
184		stmia	dst!, {r4, r5, r6, r7}
185		adcs	sum, sum, r4
186		adcs	sum, sum, r5
187		adcs	sum, sum, r6
188		adcs	sum, sum, r7
189		mov	r4, r8, lspull #8
190		sub	ip, ip, #16
191		teq	ip, #0
192		bne	1b
1932:		ands	ip, len, #12
194		beq	4f
195		tst	ip, #8
196		beq	3f
197		load2l	r5, r6
198		orr	r4, r4, r5, lspush #24
199		mov	r5, r5, lspull #8
200		orr	r5, r5, r6, lspush #24
201		stmia	dst!, {r4, r5}
202		adcs	sum, sum, r4
203		adcs	sum, sum, r5
204		mov	r4, r6, lspull #8
205		tst	ip, #4
206		beq	4f
2073:		load1l	r5
208		orr	r4, r4, r5, lspush #24
209		str	r4, [dst], #4
210		adcs	sum, sum, r4
211		mov	r4, r5, lspull #8
2124:		ands	len, len, #3
213		beq	.Ldone
214		mov	r5, r4, get_byte_0
215		tst	len, #2
216		beq	.Lexit
217		adcs	sum, sum, r4, lspush #16
218		strb	r5, [dst], #1
219		mov	r5, r4, get_byte_1
220		strb	r5, [dst], #1
221		mov	r5, r4, get_byte_2
222		b	.Lexit
223
224.Lsrc2_aligned:	mov	r4, r5, lspull #16
225		adds	sum, sum, #0
226		bics	ip, len, #15
227		beq	2f
2281:		load4l	r5, r6, r7, r8
229		orr	r4, r4, r5, lspush #16
230		mov	r5, r5, lspull #16
231		orr	r5, r5, r6, lspush #16
232		mov	r6, r6, lspull #16
233		orr	r6, r6, r7, lspush #16
234		mov	r7, r7, lspull #16
235		orr	r7, r7, r8, lspush #16
236		stmia	dst!, {r4, r5, r6, r7}
237		adcs	sum, sum, r4
238		adcs	sum, sum, r5
239		adcs	sum, sum, r6
240		adcs	sum, sum, r7
241		mov	r4, r8, lspull #16
242		sub	ip, ip, #16
243		teq	ip, #0
244		bne	1b
2452:		ands	ip, len, #12
246		beq	4f
247		tst	ip, #8
248		beq	3f
249		load2l	r5, r6
250		orr	r4, r4, r5, lspush #16
251		mov	r5, r5, lspull #16
252		orr	r5, r5, r6, lspush #16
253		stmia	dst!, {r4, r5}
254		adcs	sum, sum, r4
255		adcs	sum, sum, r5
256		mov	r4, r6, lspull #16
257		tst	ip, #4
258		beq	4f
2593:		load1l	r5
260		orr	r4, r4, r5, lspush #16
261		str	r4, [dst], #4
262		adcs	sum, sum, r4
263		mov	r4, r5, lspull #16
2644:		ands	len, len, #3
265		beq	.Ldone
266		mov	r5, r4, get_byte_0
267		tst	len, #2
268		beq	.Lexit
269		adcs	sum, sum, r4
270		strb	r5, [dst], #1
271		mov	r5, r4, get_byte_1
272		strb	r5, [dst], #1
273		tst	len, #1
274		beq	.Ldone
275		load1b	r5
276		b	.Lexit
277
278.Lsrc3_aligned:	mov	r4, r5, lspull #24
279		adds	sum, sum, #0
280		bics	ip, len, #15
281		beq	2f
2821:		load4l	r5, r6, r7, r8
283		orr	r4, r4, r5, lspush #8
284		mov	r5, r5, lspull #24
285		orr	r5, r5, r6, lspush #8
286		mov	r6, r6, lspull #24
287		orr	r6, r6, r7, lspush #8
288		mov	r7, r7, lspull #24
289		orr	r7, r7, r8, lspush #8
290		stmia	dst!, {r4, r5, r6, r7}
291		adcs	sum, sum, r4
292		adcs	sum, sum, r5
293		adcs	sum, sum, r6
294		adcs	sum, sum, r7
295		mov	r4, r8, lspull #24
296		sub	ip, ip, #16
297		teq	ip, #0
298		bne	1b
2992:		ands	ip, len, #12
300		beq	4f
301		tst	ip, #8
302		beq	3f
303		load2l	r5, r6
304		orr	r4, r4, r5, lspush #8
305		mov	r5, r5, lspull #24
306		orr	r5, r5, r6, lspush #8
307		stmia	dst!, {r4, r5}
308		adcs	sum, sum, r4
309		adcs	sum, sum, r5
310		mov	r4, r6, lspull #24
311		tst	ip, #4
312		beq	4f
3133:		load1l	r5
314		orr	r4, r4, r5, lspush #8
315		str	r4, [dst], #4
316		adcs	sum, sum, r4
317		mov	r4, r5, lspull #24
3184:		ands	len, len, #3
319		beq	.Ldone
320		mov	r5, r4, get_byte_0
321		tst	len, #2
322		beq	.Lexit
323		strb	r5, [dst], #1
324		adcs	sum, sum, r4
325		load1l	r4
326		mov	r5, r4, get_byte_0
327		strb	r5, [dst], #1
328		adcs	sum, sum, r4, lspush #24
329		mov	r5, r4, get_byte_1
330		b	.Lexit
331FN_EXIT
v4.17
 
  1/*
  2 *  linux/arch/arm/lib/csumpartialcopygeneric.S
  3 *
  4 *  Copyright (C) 1995-2001 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <asm/assembler.h>
 11
 12/*
 13 * unsigned int
 14 * csum_partial_copy_xxx(const char *src, char *dst, int len, int sum, )
 15 *  r0 = src, r1 = dst, r2 = len, r3 = sum
 16 *  Returns : r0 = checksum
 17 *
 18 * Note that 'tst' and 'teq' preserve the carry flag.
 19 */
 20
 21src	.req	r0
 22dst	.req	r1
 23len	.req	r2
 24sum	.req	r3
 25
 26.Lzero:		mov	r0, sum
 27		load_regs
 28
 29		/*
 30		 * Align an unaligned destination pointer.  We know that
 31		 * we have >= 8 bytes here, so we don't need to check
 32		 * the length.  Note that the source pointer hasn't been
 33		 * aligned yet.
 34		 */
 35.Ldst_unaligned:
 36		tst	dst, #1
 37		beq	.Ldst_16bit
 38
 39		load1b	ip
 40		sub	len, len, #1
 41		adcs	sum, sum, ip, put_byte_1	@ update checksum
 42		strb	ip, [dst], #1
 43		tst	dst, #2
 44		reteq	lr			@ dst is now 32bit aligned
 45
 46.Ldst_16bit:	load2b	r8, ip
 47		sub	len, len, #2
 48		adcs	sum, sum, r8, put_byte_0
 49		strb	r8, [dst], #1
 50		adcs	sum, sum, ip, put_byte_1
 51		strb	ip, [dst], #1
 52		ret	lr			@ dst is now 32bit aligned
 53
 54		/*
 55		 * Handle 0 to 7 bytes, with any alignment of source and
 56		 * destination pointers.  Note that when we get here, C = 0
 57		 */
 58.Lless8:	teq	len, #0			@ check for zero count
 59		beq	.Lzero
 60
 61		/* we must have at least one byte. */
 62		tst	dst, #1			@ dst 16-bit aligned
 63		beq	.Lless8_aligned
 64
 65		/* Align dst */
 66		load1b	ip
 67		sub	len, len, #1
 68		adcs	sum, sum, ip, put_byte_1	@ update checksum
 69		strb	ip, [dst], #1
 70		tst	len, #6
 71		beq	.Lless8_byteonly
 72
 731:		load2b	r8, ip
 74		sub	len, len, #2
 75		adcs	sum, sum, r8, put_byte_0
 76		strb	r8, [dst], #1
 77		adcs	sum, sum, ip, put_byte_1
 78		strb	ip, [dst], #1
 79.Lless8_aligned:
 80		tst	len, #6
 81		bne	1b
 82.Lless8_byteonly:
 83		tst	len, #1
 84		beq	.Ldone
 85		load1b	r8
 86		adcs	sum, sum, r8, put_byte_0	@ update checksum
 87		strb	r8, [dst], #1
 88		b	.Ldone
 89
 90FN_ENTRY
 91		save_regs
 
 92
 93		cmp	len, #8			@ Ensure that we have at least
 94		blo	.Lless8			@ 8 bytes to copy.
 95
 96		adds	sum, sum, #0		@ C = 0
 97		tst	dst, #3			@ Test destination alignment
 98		blne	.Ldst_unaligned		@ align destination, return here
 99
100		/*
101		 * Ok, the dst pointer is now 32bit aligned, and we know
102		 * that we must have more than 4 bytes to copy.  Note
103		 * that C contains the carry from the dst alignment above.
104		 */
105
106		tst	src, #3			@ Test source alignment
107		bne	.Lsrc_not_aligned
108
109		/* Routine for src & dst aligned */
110
111		bics	ip, len, #15
112		beq	2f
113
1141:		load4l	r4, r5, r6, r7
115		stmia	dst!, {r4, r5, r6, r7}
116		adcs	sum, sum, r4
117		adcs	sum, sum, r5
118		adcs	sum, sum, r6
119		adcs	sum, sum, r7
120		sub	ip, ip, #16
121		teq	ip, #0
122		bne	1b
123
1242:		ands	ip, len, #12
125		beq	4f
126		tst	ip, #8
127		beq	3f
128		load2l	r4, r5
129		stmia	dst!, {r4, r5}
130		adcs	sum, sum, r4
131		adcs	sum, sum, r5
132		tst	ip, #4
133		beq	4f
134
1353:		load1l	r4
136		str	r4, [dst], #4
137		adcs	sum, sum, r4
138
1394:		ands	len, len, #3
140		beq	.Ldone
141		load1l	r4
142		tst	len, #2
143		mov	r5, r4, get_byte_0
144		beq	.Lexit
145		adcs	sum, sum, r4, lspush #16
146		strb	r5, [dst], #1
147		mov	r5, r4, get_byte_1
148		strb	r5, [dst], #1
149		mov	r5, r4, get_byte_2
150.Lexit:		tst	len, #1
151		strneb	r5, [dst], #1
152		andne	r5, r5, #255
153		adcnes	sum, sum, r5, put_byte_0
154
155		/*
156		 * If the dst pointer was not 16-bit aligned, we
157		 * need to rotate the checksum here to get around
158		 * the inefficient byte manipulations in the
159		 * architecture independent code.
160		 */
161.Ldone:		adc	r0, sum, #0
162		ldr	sum, [sp, #0]		@ dst
163		tst	sum, #1
164		movne	r0, r0, ror #8
165		load_regs
166
167.Lsrc_not_aligned:
168		adc	sum, sum, #0		@ include C from dst alignment
169		and	ip, src, #3
170		bic	src, src, #3
171		load1l	r5
172		cmp	ip, #2
173		beq	.Lsrc2_aligned
174		bhi	.Lsrc3_aligned
175		mov	r4, r5, lspull #8		@ C = 0
176		bics	ip, len, #15
177		beq	2f
1781:		load4l	r5, r6, r7, r8
179		orr	r4, r4, r5, lspush #24
180		mov	r5, r5, lspull #8
181		orr	r5, r5, r6, lspush #24
182		mov	r6, r6, lspull #8
183		orr	r6, r6, r7, lspush #24
184		mov	r7, r7, lspull #8
185		orr	r7, r7, r8, lspush #24
186		stmia	dst!, {r4, r5, r6, r7}
187		adcs	sum, sum, r4
188		adcs	sum, sum, r5
189		adcs	sum, sum, r6
190		adcs	sum, sum, r7
191		mov	r4, r8, lspull #8
192		sub	ip, ip, #16
193		teq	ip, #0
194		bne	1b
1952:		ands	ip, len, #12
196		beq	4f
197		tst	ip, #8
198		beq	3f
199		load2l	r5, r6
200		orr	r4, r4, r5, lspush #24
201		mov	r5, r5, lspull #8
202		orr	r5, r5, r6, lspush #24
203		stmia	dst!, {r4, r5}
204		adcs	sum, sum, r4
205		adcs	sum, sum, r5
206		mov	r4, r6, lspull #8
207		tst	ip, #4
208		beq	4f
2093:		load1l	r5
210		orr	r4, r4, r5, lspush #24
211		str	r4, [dst], #4
212		adcs	sum, sum, r4
213		mov	r4, r5, lspull #8
2144:		ands	len, len, #3
215		beq	.Ldone
216		mov	r5, r4, get_byte_0
217		tst	len, #2
218		beq	.Lexit
219		adcs	sum, sum, r4, lspush #16
220		strb	r5, [dst], #1
221		mov	r5, r4, get_byte_1
222		strb	r5, [dst], #1
223		mov	r5, r4, get_byte_2
224		b	.Lexit
225
226.Lsrc2_aligned:	mov	r4, r5, lspull #16
227		adds	sum, sum, #0
228		bics	ip, len, #15
229		beq	2f
2301:		load4l	r5, r6, r7, r8
231		orr	r4, r4, r5, lspush #16
232		mov	r5, r5, lspull #16
233		orr	r5, r5, r6, lspush #16
234		mov	r6, r6, lspull #16
235		orr	r6, r6, r7, lspush #16
236		mov	r7, r7, lspull #16
237		orr	r7, r7, r8, lspush #16
238		stmia	dst!, {r4, r5, r6, r7}
239		adcs	sum, sum, r4
240		adcs	sum, sum, r5
241		adcs	sum, sum, r6
242		adcs	sum, sum, r7
243		mov	r4, r8, lspull #16
244		sub	ip, ip, #16
245		teq	ip, #0
246		bne	1b
2472:		ands	ip, len, #12
248		beq	4f
249		tst	ip, #8
250		beq	3f
251		load2l	r5, r6
252		orr	r4, r4, r5, lspush #16
253		mov	r5, r5, lspull #16
254		orr	r5, r5, r6, lspush #16
255		stmia	dst!, {r4, r5}
256		adcs	sum, sum, r4
257		adcs	sum, sum, r5
258		mov	r4, r6, lspull #16
259		tst	ip, #4
260		beq	4f
2613:		load1l	r5
262		orr	r4, r4, r5, lspush #16
263		str	r4, [dst], #4
264		adcs	sum, sum, r4
265		mov	r4, r5, lspull #16
2664:		ands	len, len, #3
267		beq	.Ldone
268		mov	r5, r4, get_byte_0
269		tst	len, #2
270		beq	.Lexit
271		adcs	sum, sum, r4
272		strb	r5, [dst], #1
273		mov	r5, r4, get_byte_1
274		strb	r5, [dst], #1
275		tst	len, #1
276		beq	.Ldone
277		load1b	r5
278		b	.Lexit
279
280.Lsrc3_aligned:	mov	r4, r5, lspull #24
281		adds	sum, sum, #0
282		bics	ip, len, #15
283		beq	2f
2841:		load4l	r5, r6, r7, r8
285		orr	r4, r4, r5, lspush #8
286		mov	r5, r5, lspull #24
287		orr	r5, r5, r6, lspush #8
288		mov	r6, r6, lspull #24
289		orr	r6, r6, r7, lspush #8
290		mov	r7, r7, lspull #24
291		orr	r7, r7, r8, lspush #8
292		stmia	dst!, {r4, r5, r6, r7}
293		adcs	sum, sum, r4
294		adcs	sum, sum, r5
295		adcs	sum, sum, r6
296		adcs	sum, sum, r7
297		mov	r4, r8, lspull #24
298		sub	ip, ip, #16
299		teq	ip, #0
300		bne	1b
3012:		ands	ip, len, #12
302		beq	4f
303		tst	ip, #8
304		beq	3f
305		load2l	r5, r6
306		orr	r4, r4, r5, lspush #8
307		mov	r5, r5, lspull #24
308		orr	r5, r5, r6, lspush #8
309		stmia	dst!, {r4, r5}
310		adcs	sum, sum, r4
311		adcs	sum, sum, r5
312		mov	r4, r6, lspull #24
313		tst	ip, #4
314		beq	4f
3153:		load1l	r5
316		orr	r4, r4, r5, lspush #8
317		str	r4, [dst], #4
318		adcs	sum, sum, r4
319		mov	r4, r5, lspull #24
3204:		ands	len, len, #3
321		beq	.Ldone
322		mov	r5, r4, get_byte_0
323		tst	len, #2
324		beq	.Lexit
325		strb	r5, [dst], #1
326		adcs	sum, sum, r4
327		load1l	r4
328		mov	r5, r4, get_byte_0
329		strb	r5, [dst], #1
330		adcs	sum, sum, r4, lspush #24
331		mov	r5, r4, get_byte_1
332		b	.Lexit
333FN_EXIT