xref: /openbmc/linux/arch/arm/lib/csumpartial.S (revision 84d69848c97faab0c25aa2667b273404d2e2a64a)
1/*
2 *  linux/arch/arm/lib/csumpartial.S
3 *
4 *  Copyright (C) 1995-1998 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/linkage.h>
11#include <asm/assembler.h>
12#include <asm/export.h>
13
14		.text
15
16/*
17 * Function: __u32 csum_partial(const char *src, int len, __u32 sum)
18 * Params  : r0 = buffer, r1 = len, r2 = checksum
19 * Returns : r0 = new checksum
20 */
21
22buf	.req	r0
23len	.req	r1
24sum	.req	r2
25td0	.req	r3
26td1	.req	r4	@ save before use
27td2	.req	r5	@ save before use
28td3	.req	lr
29
30.Lzero:		mov	r0, sum
31		add	sp, sp, #4
32		ldr	pc, [sp], #4
33
34		/*
35		 * Handle 0 to 7 bytes, with any alignment of source and
36		 * destination pointers.  Note that when we get here, C = 0
37		 */
38.Lless8:		teq	len, #0			@ check for zero count
39		beq	.Lzero
40
41		/* we must have at least one byte. */
42		tst	buf, #1			@ odd address?
43		movne	sum, sum, ror #8
44		ldrneb	td0, [buf], #1
45		subne	len, len, #1
46		adcnes	sum, sum, td0, put_byte_1
47
48.Lless4:		tst	len, #6
49		beq	.Lless8_byte
50
51		/* we are now half-word aligned */
52
53.Lless8_wordlp:
54#if __LINUX_ARM_ARCH__ >= 4
55		ldrh	td0, [buf], #2
56		sub	len, len, #2
57#else
58		ldrb	td0, [buf], #1
59		ldrb	td3, [buf], #1
60		sub	len, len, #2
61#ifndef __ARMEB__
62		orr	td0, td0, td3, lsl #8
63#else
64		orr	td0, td3, td0, lsl #8
65#endif
66#endif
67		adcs	sum, sum, td0
68		tst	len, #6
69		bne	.Lless8_wordlp
70
71.Lless8_byte:	tst	len, #1			@ odd number of bytes
72		ldrneb	td0, [buf], #1		@ include last byte
73		adcnes	sum, sum, td0, put_byte_0	@ update checksum
74
75.Ldone:		adc	r0, sum, #0		@ collect up the last carry
76		ldr	td0, [sp], #4
77		tst	td0, #1			@ check buffer alignment
78		movne	r0, r0, ror #8		@ rotate checksum by 8 bits
79		ldr	pc, [sp], #4		@ return
80
81.Lnot_aligned:	tst	buf, #1			@ odd address
82		ldrneb	td0, [buf], #1		@ make even
83		subne	len, len, #1
84		adcnes	sum, sum, td0, put_byte_1	@ update checksum
85
86		tst	buf, #2			@ 32-bit aligned?
87#if __LINUX_ARM_ARCH__ >= 4
88		ldrneh	td0, [buf], #2		@ make 32-bit aligned
89		subne	len, len, #2
90#else
91		ldrneb	td0, [buf], #1
92		ldrneb	ip, [buf], #1
93		subne	len, len, #2
94#ifndef __ARMEB__
95		orrne	td0, td0, ip, lsl #8
96#else
97		orrne	td0, ip, td0, lsl #8
98#endif
99#endif
100		adcnes	sum, sum, td0		@ update checksum
101		ret	lr
102
103ENTRY(csum_partial)
104		stmfd	sp!, {buf, lr}
105		cmp	len, #8			@ Ensure that we have at least
106		blo	.Lless8			@ 8 bytes to copy.
107
108		tst	buf, #1
109		movne	sum, sum, ror #8
110
111		adds	sum, sum, #0		@ C = 0
112		tst	buf, #3			@ Test destination alignment
113		blne	.Lnot_aligned		@ align destination, return here
114
1151:		bics	ip, len, #31
116		beq	3f
117
118		stmfd	sp!, {r4 - r5}
1192:		ldmia	buf!, {td0, td1, td2, td3}
120		adcs	sum, sum, td0
121		adcs	sum, sum, td1
122		adcs	sum, sum, td2
123		adcs	sum, sum, td3
124		ldmia	buf!, {td0, td1, td2, td3}
125		adcs	sum, sum, td0
126		adcs	sum, sum, td1
127		adcs	sum, sum, td2
128		adcs	sum, sum, td3
129		sub	ip, ip, #32
130		teq	ip, #0
131		bne	2b
132		ldmfd	sp!, {r4 - r5}
133
1343:		tst	len, #0x1c		@ should not change C
135		beq	.Lless4
136
1374:		ldr	td0, [buf], #4
138		sub	len, len, #4
139		adcs	sum, sum, td0
140		tst	len, #0x1c
141		bne	4b
142		b	.Lless4
143ENDPROC(csum_partial)
144EXPORT_SYMBOL(csum_partial)
145