Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 *  linux/arch/arm/lib/csumpartial.S
  3 *
  4 *  Copyright (C) 1995-1998 Russell King
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 */
 10#include <linux/linkage.h>
 11#include <asm/assembler.h>
 12
 13		.text
 14
 15/*
 16 * Function: __u32 csum_partial(const char *src, int len, __u32 sum)
 17 * Params  : r0 = buffer, r1 = len, r2 = checksum
 18 * Returns : r0 = new checksum
 19 */
 20
 21buf	.req	r0
 22len	.req	r1
 23sum	.req	r2
 24td0	.req	r3
 25td1	.req	r4	@ save before use
 26td2	.req	r5	@ save before use
 27td3	.req	lr
 28
 29.Lzero:		mov	r0, sum
 30		add	sp, sp, #4
 31		ldr	pc, [sp], #4
 32
 33		/*
 34		 * Handle 0 to 7 bytes, with any alignment of source and
 35		 * destination pointers.  Note that when we get here, C = 0
 36		 */
 37.Lless8:		teq	len, #0			@ check for zero count
 38		beq	.Lzero
 39
 40		/* we must have at least one byte. */
 41		tst	buf, #1			@ odd address?
 42		movne	sum, sum, ror #8
 43		ldrneb	td0, [buf], #1
 44		subne	len, len, #1
 45		adcnes	sum, sum, td0, put_byte_1
 46
 47.Lless4:		tst	len, #6
 48		beq	.Lless8_byte
 49
 50		/* we are now half-word aligned */
 51
 52.Lless8_wordlp:
 53#if __LINUX_ARM_ARCH__ >= 4
 54		ldrh	td0, [buf], #2
 55		sub	len, len, #2
 56#else
 57		ldrb	td0, [buf], #1
 58		ldrb	td3, [buf], #1
 59		sub	len, len, #2
 60#ifndef __ARMEB__
 61		orr	td0, td0, td3, lsl #8
 62#else
 63		orr	td0, td3, td0, lsl #8
 64#endif
 65#endif
 66		adcs	sum, sum, td0
 67		tst	len, #6
 68		bne	.Lless8_wordlp
 69
 70.Lless8_byte:	tst	len, #1			@ odd number of bytes
 71		ldrneb	td0, [buf], #1		@ include last byte
 72		adcnes	sum, sum, td0, put_byte_0	@ update checksum
 73
 74.Ldone:		adc	r0, sum, #0		@ collect up the last carry
 75		ldr	td0, [sp], #4
 76		tst	td0, #1			@ check buffer alignment
 77		movne	r0, r0, ror #8		@ rotate checksum by 8 bits
 78		ldr	pc, [sp], #4		@ return
 79
 80.Lnot_aligned:	tst	buf, #1			@ odd address
 81		ldrneb	td0, [buf], #1		@ make even
 82		subne	len, len, #1
 83		adcnes	sum, sum, td0, put_byte_1	@ update checksum
 84
 85		tst	buf, #2			@ 32-bit aligned?
 86#if __LINUX_ARM_ARCH__ >= 4
 87		ldrneh	td0, [buf], #2		@ make 32-bit aligned
 88		subne	len, len, #2
 89#else
 90		ldrneb	td0, [buf], #1
 91		ldrneb	ip, [buf], #1
 92		subne	len, len, #2
 93#ifndef __ARMEB__
 94		orrne	td0, td0, ip, lsl #8
 95#else
 96		orrne	td0, ip, td0, lsl #8
 97#endif
 98#endif
 99		adcnes	sum, sum, td0		@ update checksum
100		mov	pc, lr
101
102ENTRY(csum_partial)
103		stmfd	sp!, {buf, lr}
104		cmp	len, #8			@ Ensure that we have at least
105		blo	.Lless8			@ 8 bytes to copy.
106
107		tst	buf, #1
108		movne	sum, sum, ror #8
109
110		adds	sum, sum, #0		@ C = 0
111		tst	buf, #3			@ Test destination alignment
112		blne	.Lnot_aligned		@ align destination, return here
113
1141:		bics	ip, len, #31
115		beq	3f
116
117		stmfd	sp!, {r4 - r5}
1182:		ldmia	buf!, {td0, td1, td2, td3}
119		adcs	sum, sum, td0
120		adcs	sum, sum, td1
121		adcs	sum, sum, td2
122		adcs	sum, sum, td3
123		ldmia	buf!, {td0, td1, td2, td3}
124		adcs	sum, sum, td0
125		adcs	sum, sum, td1
126		adcs	sum, sum, td2
127		adcs	sum, sum, td3
128		sub	ip, ip, #32
129		teq	ip, #0
130		bne	2b
131		ldmfd	sp!, {r4 - r5}
132
1333:		tst	len, #0x1c		@ should not change C
134		beq	.Lless4
135
1364:		ldr	td0, [buf], #4
137		sub	len, len, #4
138		adcs	sum, sum, td0
139		tst	len, #0x1c
140		bne	4b
141		b	.Lless4
142ENDPROC(csum_partial)
v6.8
  1/* SPDX-License-Identifier: GPL-2.0-only */
  2/*
  3 *  linux/arch/arm/lib/csumpartial.S
  4 *
  5 *  Copyright (C) 1995-1998 Russell King
 
 
 
 
  6 */
  7#include <linux/linkage.h>
  8#include <asm/assembler.h>
  9
 10		.text
 11
 12/*
 13 * Function: __u32 csum_partial(const char *src, int len, __u32 sum)
 14 * Params  : r0 = buffer, r1 = len, r2 = checksum
 15 * Returns : r0 = new checksum
 16 */
 17
 18buf	.req	r0
 19len	.req	r1
 20sum	.req	r2
 21td0	.req	r3
 22td1	.req	r4	@ save before use
 23td2	.req	r5	@ save before use
 24td3	.req	lr
 25
 26.Lzero:		mov	r0, sum
 27		add	sp, sp, #4
 28		ldr	pc, [sp], #4
 29
 30		/*
 31		 * Handle 0 to 7 bytes, with any alignment of source and
 32		 * destination pointers.  Note that when we get here, C = 0
 33		 */
 34.Lless8:		teq	len, #0			@ check for zero count
 35		beq	.Lzero
 36
 37		/* we must have at least one byte. */
 38		tst	buf, #1			@ odd address?
 39		movne	sum, sum, ror #8
 40		ldrbne	td0, [buf], #1
 41		subne	len, len, #1
 42		adcsne	sum, sum, td0, put_byte_1
 43
 44.Lless4:		tst	len, #6
 45		beq	.Lless8_byte
 46
 47		/* we are now half-word aligned */
 48
 49.Lless8_wordlp:
 50#if __LINUX_ARM_ARCH__ >= 4
 51		ldrh	td0, [buf], #2
 52		sub	len, len, #2
 53#else
 54		ldrb	td0, [buf], #1
 55		ldrb	td3, [buf], #1
 56		sub	len, len, #2
 57#ifndef __ARMEB__
 58		orr	td0, td0, td3, lsl #8
 59#else
 60		orr	td0, td3, td0, lsl #8
 61#endif
 62#endif
 63		adcs	sum, sum, td0
 64		tst	len, #6
 65		bne	.Lless8_wordlp
 66
 67.Lless8_byte:	tst	len, #1			@ odd number of bytes
 68		ldrbne	td0, [buf], #1		@ include last byte
 69		adcsne	sum, sum, td0, put_byte_0	@ update checksum
 70
 71.Ldone:		adc	r0, sum, #0		@ collect up the last carry
 72		ldr	td0, [sp], #4
 73		tst	td0, #1			@ check buffer alignment
 74		movne	r0, r0, ror #8		@ rotate checksum by 8 bits
 75		ldr	pc, [sp], #4		@ return
 76
 77.Lnot_aligned:	tst	buf, #1			@ odd address
 78		ldrbne	td0, [buf], #1		@ make even
 79		subne	len, len, #1
 80		adcsne	sum, sum, td0, put_byte_1	@ update checksum
 81
 82		tst	buf, #2			@ 32-bit aligned?
 83#if __LINUX_ARM_ARCH__ >= 4
 84		ldrhne	td0, [buf], #2		@ make 32-bit aligned
 85		subne	len, len, #2
 86#else
 87		ldrbne	td0, [buf], #1
 88		ldrbne	ip, [buf], #1
 89		subne	len, len, #2
 90#ifndef __ARMEB__
 91		orrne	td0, td0, ip, lsl #8
 92#else
 93		orrne	td0, ip, td0, lsl #8
 94#endif
 95#endif
 96		adcsne	sum, sum, td0		@ update checksum
 97		ret	lr
 98
 99ENTRY(csum_partial)
100		stmfd	sp!, {buf, lr}
101		cmp	len, #8			@ Ensure that we have at least
102		blo	.Lless8			@ 8 bytes to copy.
103
104		tst	buf, #1
105		movne	sum, sum, ror #8
106
107		adds	sum, sum, #0		@ C = 0
108		tst	buf, #3			@ Test destination alignment
109		blne	.Lnot_aligned		@ align destination, return here
110
1111:		bics	ip, len, #31
112		beq	3f
113
114		stmfd	sp!, {r4 - r5}
1152:		ldmia	buf!, {td0, td1, td2, td3}
116		adcs	sum, sum, td0
117		adcs	sum, sum, td1
118		adcs	sum, sum, td2
119		adcs	sum, sum, td3
120		ldmia	buf!, {td0, td1, td2, td3}
121		adcs	sum, sum, td0
122		adcs	sum, sum, td1
123		adcs	sum, sum, td2
124		adcs	sum, sum, td3
125		sub	ip, ip, #32
126		teq	ip, #0
127		bne	2b
128		ldmfd	sp!, {r4 - r5}
129
1303:		tst	len, #0x1c		@ should not change C
131		beq	.Lless4
132
1334:		ldr	td0, [buf], #4
134		sub	len, len, #4
135		adcs	sum, sum, td0
136		tst	len, #0x1c
137		bne	4b
138		b	.Lless4
139ENDPROC(csum_partial)