xref: /openbmc/linux/arch/mips/lib/csum_partial.S (revision 643d1f7f)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Quick'n'dirty IP checksum ...
7 *
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2007  Maciej W. Rozycki
11 */
12#include <linux/errno.h>
13#include <asm/asm.h>
14#include <asm/asm-offsets.h>
15#include <asm/regdef.h>
16
17#ifdef CONFIG_64BIT
18/*
19 * As we are sharing code base with the mips32 tree (which use the o32 ABI
20 * register definitions). We need to redefine the register definitions from
21 * the n64 ABI register naming to the o32 ABI register naming.
22 */
23#undef t0
24#undef t1
25#undef t2
26#undef t3
27#define t0	$8
28#define t1	$9
29#define t2	$10
30#define t3	$11
31#define t4	$12
32#define t5	$13
33#define t6	$14
34#define t7	$15
35
36#define USE_DOUBLE
37#endif
38
39#ifdef USE_DOUBLE
40
41#define LOAD   ld
42#define ADD    daddu
43#define NBYTES 8
44
45#else
46
47#define LOAD   lw
48#define ADD    addu
49#define NBYTES 4
50
51#endif /* USE_DOUBLE */
52
53#define UNIT(unit)  ((unit)*NBYTES)
54
55#define ADDC(sum,reg)						\
56	.set	push;						\
57	.set	noat;						\
58	ADD	sum, reg;					\
59	sltu	v1, sum, reg;					\
60	ADD	sum, v1;					\
61	.set	pop
62
63#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)	\
64	LOAD	_t0, (offset + UNIT(0))(src);			\
65	LOAD	_t1, (offset + UNIT(1))(src);			\
66	LOAD	_t2, (offset + UNIT(2))(src); 			\
67	LOAD	_t3, (offset + UNIT(3))(src); 			\
68	ADDC(sum, _t0);						\
69	ADDC(sum, _t1);						\
70	ADDC(sum, _t2);						\
71	ADDC(sum, _t3)
72
73#ifdef USE_DOUBLE
74#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3)	\
75	CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
76#else
77#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3)	\
78	CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3);	\
79	CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
80#endif
81
82/*
83 * a0: source address
84 * a1: length of the area to checksum
85 * a2: partial checksum
86 */
87
88#define src a0
89#define sum v0
90
91	.text
92	.set	noreorder
93	.align	5
94LEAF(csum_partial)
95	move	sum, zero
96	move	t7, zero
97
98	sltiu	t8, a1, 0x8
99	bnez	t8, .Lsmall_csumcpy		/* < 8 bytes to copy */
100	 move	t2, a1
101
102	andi	t7, src, 0x1			/* odd buffer? */
103
104.Lhword_align:
105	beqz	t7, .Lword_align
106	 andi	t8, src, 0x2
107
108	lbu	t0, (src)
109	LONG_SUBU	a1, a1, 0x1
110#ifdef __MIPSEL__
111	sll	t0, t0, 8
112#endif
113	ADDC(sum, t0)
114	PTR_ADDU	src, src, 0x1
115	andi	t8, src, 0x2
116
117.Lword_align:
118	beqz	t8, .Ldword_align
119	 sltiu	t8, a1, 56
120
121	lhu	t0, (src)
122	LONG_SUBU	a1, a1, 0x2
123	ADDC(sum, t0)
124	sltiu	t8, a1, 56
125	PTR_ADDU	src, src, 0x2
126
127.Ldword_align:
128	bnez	t8, .Ldo_end_words
129	 move	t8, a1
130
131	andi	t8, src, 0x4
132	beqz	t8, .Lqword_align
133	 andi	t8, src, 0x8
134
135	lw	t0, 0x00(src)
136	LONG_SUBU	a1, a1, 0x4
137	ADDC(sum, t0)
138	PTR_ADDU	src, src, 0x4
139	andi	t8, src, 0x8
140
141.Lqword_align:
142	beqz	t8, .Loword_align
143	 andi	t8, src, 0x10
144
145#ifdef USE_DOUBLE
146	ld	t0, 0x00(src)
147	LONG_SUBU	a1, a1, 0x8
148	ADDC(sum, t0)
149#else
150	lw	t0, 0x00(src)
151	lw	t1, 0x04(src)
152	LONG_SUBU	a1, a1, 0x8
153	ADDC(sum, t0)
154	ADDC(sum, t1)
155#endif
156	PTR_ADDU	src, src, 0x8
157	andi	t8, src, 0x10
158
159.Loword_align:
160	beqz	t8, .Lbegin_movement
161	 LONG_SRL	t8, a1, 0x7
162
163#ifdef USE_DOUBLE
164	ld	t0, 0x00(src)
165	ld	t1, 0x08(src)
166	ADDC(sum, t0)
167	ADDC(sum, t1)
168#else
169	CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
170#endif
171	LONG_SUBU	a1, a1, 0x10
172	PTR_ADDU	src, src, 0x10
173	LONG_SRL	t8, a1, 0x7
174
175.Lbegin_movement:
176	beqz	t8, 1f
177	 andi	t2, a1, 0x40
178
179.Lmove_128bytes:
180	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
181	CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
182	CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
183	CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
184	LONG_SUBU	t8, t8, 0x01
185	.set	reorder				/* DADDI_WAR */
186	PTR_ADDU	src, src, 0x80
187	bnez	t8, .Lmove_128bytes
188	.set	noreorder
189
1901:
191	beqz	t2, 1f
192	 andi	t2, a1, 0x20
193
194.Lmove_64bytes:
195	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
196	CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
197	PTR_ADDU	src, src, 0x40
198
1991:
200	beqz	t2, .Ldo_end_words
201	 andi	t8, a1, 0x1c
202
203.Lmove_32bytes:
204	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
205	andi	t8, a1, 0x1c
206	PTR_ADDU	src, src, 0x20
207
208.Ldo_end_words:
209	beqz	t8, .Lsmall_csumcpy
210	 andi	t2, a1, 0x3
211	LONG_SRL	t8, t8, 0x2
212
213.Lend_words:
214	lw	t0, (src)
215	LONG_SUBU	t8, t8, 0x1
216	ADDC(sum, t0)
217	.set	reorder				/* DADDI_WAR */
218	PTR_ADDU	src, src, 0x4
219	bnez	t8, .Lend_words
220	.set	noreorder
221
222/* unknown src alignment and < 8 bytes to go  */
223.Lsmall_csumcpy:
224	move	a1, t2
225
226	andi	t0, a1, 4
227	beqz	t0, 1f
228	 andi	t0, a1, 2
229
230	/* Still a full word to go  */
231	ulw	t1, (src)
232	PTR_ADDIU	src, 4
233	ADDC(sum, t1)
234
2351:	move	t1, zero
236	beqz	t0, 1f
237	 andi	t0, a1, 1
238
239	/* Still a halfword to go  */
240	ulhu	t1, (src)
241	PTR_ADDIU	src, 2
242
2431:	beqz	t0, 1f
244	 sll	t1, t1, 16
245
246	lbu	t2, (src)
247	 nop
248
249#ifdef __MIPSEB__
250	sll	t2, t2, 8
251#endif
252	or	t1, t2
253
2541:	ADDC(sum, t1)
255
256	/* fold checksum */
257	.set	push
258	.set	noat
259#ifdef USE_DOUBLE
260	dsll32	v1, sum, 0
261	daddu	sum, v1
262	sltu	v1, sum, v1
263	dsra32	sum, sum, 0
264	addu	sum, v1
265#endif
266	sll	v1, sum, 16
267	addu	sum, v1
268	sltu	v1, sum, v1
269	srl	sum, sum, 16
270	addu	sum, v1
271
272	/* odd buffer alignment? */
273	beqz	t7, 1f
274	 nop
275	sll	v1, sum, 8
276	srl	sum, sum, 8
277	or	sum, v1
278	andi	sum, 0xffff
279	.set	pop
2801:
281	.set	reorder
282	/* Add the passed partial csum.  */
283	ADDC(sum, a2)
284	jr	ra
285	.set	noreorder
286	END(csum_partial)
287
288
289/*
290 * checksum and copy routines based on memcpy.S
291 *
292 *	csum_partial_copy_nocheck(src, dst, len, sum)
293 *	__csum_partial_copy_user(src, dst, len, sum, errp)
294 *
295 * See "Spec" in memcpy.S for details.  Unlike __copy_user, all
296 * function in this file use the standard calling convention.
297 */
298
299#define src a0
300#define dst a1
301#define len a2
302#define psum a3
303#define sum v0
304#define odd t8
305#define errptr t9
306
307/*
308 * The exception handler for loads requires that:
309 *  1- AT contain the address of the byte just past the end of the source
310 *     of the copy,
311 *  2- src_entry <= src < AT, and
312 *  3- (dst - src) == (dst_entry - src_entry),
313 * The _entry suffix denotes values when __copy_user was called.
314 *
315 * (1) is set up up by __csum_partial_copy_from_user and maintained by
316 *	not writing AT in __csum_partial_copy
317 * (2) is met by incrementing src by the number of bytes copied
318 * (3) is met by not doing loads between a pair of increments of dst and src
319 *
320 * The exception handlers for stores stores -EFAULT to errptr and return.
321 * These handlers do not need to overwrite any data.
322 */
323
324#define EXC(inst_reg,addr,handler)		\
3259:	inst_reg, addr;				\
326	.section __ex_table,"a";		\
327	PTR	9b, handler;			\
328	.previous
329
330#ifdef USE_DOUBLE
331
332#define LOAD   ld
333#define LOADL  ldl
334#define LOADR  ldr
335#define STOREL sdl
336#define STORER sdr
337#define STORE  sd
338#define ADD    daddu
339#define SUB    dsubu
340#define SRL    dsrl
341#define SLL    dsll
342#define SLLV   dsllv
343#define SRLV   dsrlv
344#define NBYTES 8
345#define LOG_NBYTES 3
346
347#else
348
349#define LOAD   lw
350#define LOADL  lwl
351#define LOADR  lwr
352#define STOREL swl
353#define STORER swr
354#define STORE  sw
355#define ADD    addu
356#define SUB    subu
357#define SRL    srl
358#define SLL    sll
359#define SLLV   sllv
360#define SRLV   srlv
361#define NBYTES 4
362#define LOG_NBYTES 2
363
364#endif /* USE_DOUBLE */
365
366#ifdef CONFIG_CPU_LITTLE_ENDIAN
367#define LDFIRST LOADR
368#define LDREST  LOADL
369#define STFIRST STORER
370#define STREST  STOREL
371#define SHIFT_DISCARD SLLV
372#define SHIFT_DISCARD_REVERT SRLV
373#else
374#define LDFIRST LOADL
375#define LDREST  LOADR
376#define STFIRST STOREL
377#define STREST  STORER
378#define SHIFT_DISCARD SRLV
379#define SHIFT_DISCARD_REVERT SLLV
380#endif
381
382#define FIRST(unit) ((unit)*NBYTES)
383#define REST(unit)  (FIRST(unit)+NBYTES-1)
384
385#define ADDRMASK (NBYTES-1)
386
387#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
388	.set	noat
389#else
390	.set	at=v1
391#endif
392
393LEAF(__csum_partial_copy_user)
394	PTR_ADDU	AT, src, len	/* See (1) above. */
395#ifdef CONFIG_64BIT
396	move	errptr, a4
397#else
398	lw	errptr, 16(sp)
399#endif
400FEXPORT(csum_partial_copy_nocheck)
401	move	sum, zero
402	move	odd, zero
403	/*
404	 * Note: dst & src may be unaligned, len may be 0
405	 * Temps
406	 */
407	/*
408	 * The "issue break"s below are very approximate.
409	 * Issue delays for dcache fills will perturb the schedule, as will
410	 * load queue full replay traps, etc.
411	 *
412	 * If len < NBYTES use byte operations.
413	 */
414	sltu	t2, len, NBYTES
415	and	t1, dst, ADDRMASK
416	bnez	t2, .Lcopy_bytes_checklen
417	 and	t0, src, ADDRMASK
418	andi	odd, dst, 0x1			/* odd buffer? */
419	bnez	t1, .Ldst_unaligned
420	 nop
421	bnez	t0, .Lsrc_unaligned_dst_aligned
422	/*
423	 * use delay slot for fall-through
424	 * src and dst are aligned; need to compute rem
425	 */
426.Lboth_aligned:
427	 SRL	t0, len, LOG_NBYTES+3    # +3 for 8 units/iter
428	beqz	t0, .Lcleanup_both_aligned # len < 8*NBYTES
429	 nop
430	SUB	len, 8*NBYTES		# subtract here for bgez loop
431	.align	4
4321:
433EXC(	LOAD	t0, UNIT(0)(src),	.Ll_exc)
434EXC(	LOAD	t1, UNIT(1)(src),	.Ll_exc_copy)
435EXC(	LOAD	t2, UNIT(2)(src),	.Ll_exc_copy)
436EXC(	LOAD	t3, UNIT(3)(src),	.Ll_exc_copy)
437EXC(	LOAD	t4, UNIT(4)(src),	.Ll_exc_copy)
438EXC(	LOAD	t5, UNIT(5)(src),	.Ll_exc_copy)
439EXC(	LOAD	t6, UNIT(6)(src),	.Ll_exc_copy)
440EXC(	LOAD	t7, UNIT(7)(src),	.Ll_exc_copy)
441	SUB	len, len, 8*NBYTES
442	ADD	src, src, 8*NBYTES
443EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc)
444	ADDC(sum, t0)
445EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc)
446	ADDC(sum, t1)
447EXC(	STORE	t2, UNIT(2)(dst),	.Ls_exc)
448	ADDC(sum, t2)
449EXC(	STORE	t3, UNIT(3)(dst),	.Ls_exc)
450	ADDC(sum, t3)
451EXC(	STORE	t4, UNIT(4)(dst),	.Ls_exc)
452	ADDC(sum, t4)
453EXC(	STORE	t5, UNIT(5)(dst),	.Ls_exc)
454	ADDC(sum, t5)
455EXC(	STORE	t6, UNIT(6)(dst),	.Ls_exc)
456	ADDC(sum, t6)
457EXC(	STORE	t7, UNIT(7)(dst),	.Ls_exc)
458	ADDC(sum, t7)
459	.set	reorder				/* DADDI_WAR */
460	ADD	dst, dst, 8*NBYTES
461	bgez	len, 1b
462	.set	noreorder
463	ADD	len, 8*NBYTES		# revert len (see above)
464
465	/*
466	 * len == the number of bytes left to copy < 8*NBYTES
467	 */
468.Lcleanup_both_aligned:
469#define rem t7
470	beqz	len, .Ldone
471	 sltu	t0, len, 4*NBYTES
472	bnez	t0, .Lless_than_4units
473	 and	rem, len, (NBYTES-1)	# rem = len % NBYTES
474	/*
475	 * len >= 4*NBYTES
476	 */
477EXC(	LOAD	t0, UNIT(0)(src),	.Ll_exc)
478EXC(	LOAD	t1, UNIT(1)(src),	.Ll_exc_copy)
479EXC(	LOAD	t2, UNIT(2)(src),	.Ll_exc_copy)
480EXC(	LOAD	t3, UNIT(3)(src),	.Ll_exc_copy)
481	SUB	len, len, 4*NBYTES
482	ADD	src, src, 4*NBYTES
483EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc)
484	ADDC(sum, t0)
485EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc)
486	ADDC(sum, t1)
487EXC(	STORE	t2, UNIT(2)(dst),	.Ls_exc)
488	ADDC(sum, t2)
489EXC(	STORE	t3, UNIT(3)(dst),	.Ls_exc)
490	ADDC(sum, t3)
491	.set	reorder				/* DADDI_WAR */
492	ADD	dst, dst, 4*NBYTES
493	beqz	len, .Ldone
494	.set	noreorder
495.Lless_than_4units:
496	/*
497	 * rem = len % NBYTES
498	 */
499	beq	rem, len, .Lcopy_bytes
500	 nop
5011:
502EXC(	LOAD	t0, 0(src),		.Ll_exc)
503	ADD	src, src, NBYTES
504	SUB	len, len, NBYTES
505EXC(	STORE	t0, 0(dst),		.Ls_exc)
506	ADDC(sum, t0)
507	.set	reorder				/* DADDI_WAR */
508	ADD	dst, dst, NBYTES
509	bne	rem, len, 1b
510	.set	noreorder
511
512	/*
513	 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
514	 * A loop would do only a byte at a time with possible branch
515	 * mispredicts.  Can't do an explicit LOAD dst,mask,or,STORE
516	 * because can't assume read-access to dst.  Instead, use
517	 * STREST dst, which doesn't require read access to dst.
518	 *
519	 * This code should perform better than a simple loop on modern,
520	 * wide-issue mips processors because the code has fewer branches and
521	 * more instruction-level parallelism.
522	 */
523#define bits t2
524	beqz	len, .Ldone
525	 ADD	t1, dst, len	# t1 is just past last byte of dst
526	li	bits, 8*NBYTES
527	SLL	rem, len, 3	# rem = number of bits to keep
528EXC(	LOAD	t0, 0(src),		.Ll_exc)
529	SUB	bits, bits, rem	# bits = number of bits to discard
530	SHIFT_DISCARD t0, t0, bits
531EXC(	STREST	t0, -1(t1),		.Ls_exc)
532	SHIFT_DISCARD_REVERT t0, t0, bits
533	.set reorder
534	ADDC(sum, t0)
535	b	.Ldone
536	.set noreorder
537.Ldst_unaligned:
538	/*
539	 * dst is unaligned
540	 * t0 = src & ADDRMASK
541	 * t1 = dst & ADDRMASK; T1 > 0
542	 * len >= NBYTES
543	 *
544	 * Copy enough bytes to align dst
545	 * Set match = (src and dst have same alignment)
546	 */
547#define match rem
548EXC(	LDFIRST	t3, FIRST(0)(src),	.Ll_exc)
549	ADD	t2, zero, NBYTES
550EXC(	LDREST	t3, REST(0)(src),	.Ll_exc_copy)
551	SUB	t2, t2, t1	# t2 = number of bytes copied
552	xor	match, t0, t1
553EXC(	STFIRST t3, FIRST(0)(dst),	.Ls_exc)
554	SLL	t4, t1, 3		# t4 = number of bits to discard
555	SHIFT_DISCARD t3, t3, t4
556	/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
557	ADDC(sum, t3)
558	beq	len, t2, .Ldone
559	 SUB	len, len, t2
560	ADD	dst, dst, t2
561	beqz	match, .Lboth_aligned
562	 ADD	src, src, t2
563
564.Lsrc_unaligned_dst_aligned:
565	SRL	t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
566	beqz	t0, .Lcleanup_src_unaligned
567	 and	rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
5681:
569/*
570 * Avoid consecutive LD*'s to the same register since some mips
571 * implementations can't issue them in the same cycle.
572 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
573 * are to the same unit (unless src is aligned, but it's not).
574 */
575EXC(	LDFIRST	t0, FIRST(0)(src),	.Ll_exc)
576EXC(	LDFIRST	t1, FIRST(1)(src),	.Ll_exc_copy)
577	SUB     len, len, 4*NBYTES
578EXC(	LDREST	t0, REST(0)(src),	.Ll_exc_copy)
579EXC(	LDREST	t1, REST(1)(src),	.Ll_exc_copy)
580EXC(	LDFIRST	t2, FIRST(2)(src),	.Ll_exc_copy)
581EXC(	LDFIRST	t3, FIRST(3)(src),	.Ll_exc_copy)
582EXC(	LDREST	t2, REST(2)(src),	.Ll_exc_copy)
583EXC(	LDREST	t3, REST(3)(src),	.Ll_exc_copy)
584	ADD	src, src, 4*NBYTES
585#ifdef CONFIG_CPU_SB1
586	nop				# improves slotting
587#endif
588EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc)
589	ADDC(sum, t0)
590EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc)
591	ADDC(sum, t1)
592EXC(	STORE	t2, UNIT(2)(dst),	.Ls_exc)
593	ADDC(sum, t2)
594EXC(	STORE	t3, UNIT(3)(dst),	.Ls_exc)
595	ADDC(sum, t3)
596	.set	reorder				/* DADDI_WAR */
597	ADD	dst, dst, 4*NBYTES
598	bne	len, rem, 1b
599	.set	noreorder
600
601.Lcleanup_src_unaligned:
602	beqz	len, .Ldone
603	 and	rem, len, NBYTES-1  # rem = len % NBYTES
604	beq	rem, len, .Lcopy_bytes
605	 nop
6061:
607EXC(	LDFIRST t0, FIRST(0)(src),	.Ll_exc)
608EXC(	LDREST	t0, REST(0)(src),	.Ll_exc_copy)
609	ADD	src, src, NBYTES
610	SUB	len, len, NBYTES
611EXC(	STORE	t0, 0(dst),		.Ls_exc)
612	ADDC(sum, t0)
613	.set	reorder				/* DADDI_WAR */
614	ADD	dst, dst, NBYTES
615	bne	len, rem, 1b
616	.set	noreorder
617
618.Lcopy_bytes_checklen:
619	beqz	len, .Ldone
620	 nop
621.Lcopy_bytes:
622	/* 0 < len < NBYTES  */
623#ifdef CONFIG_CPU_LITTLE_ENDIAN
624#define SHIFT_START 0
625#define SHIFT_INC 8
626#else
627#define SHIFT_START 8*(NBYTES-1)
628#define SHIFT_INC -8
629#endif
630	move	t2, zero	# partial word
631	li	t3, SHIFT_START	# shift
632/* use .Ll_exc_copy here to return correct sum on fault */
633#define COPY_BYTE(N)			\
634EXC(	lbu	t0, N(src), .Ll_exc_copy);	\
635	SUB	len, len, 1;		\
636EXC(	sb	t0, N(dst), .Ls_exc);	\
637	SLLV	t0, t0, t3;		\
638	addu	t3, SHIFT_INC;		\
639	beqz	len, .Lcopy_bytes_done;	\
640	 or	t2, t0
641
642	COPY_BYTE(0)
643	COPY_BYTE(1)
644#ifdef USE_DOUBLE
645	COPY_BYTE(2)
646	COPY_BYTE(3)
647	COPY_BYTE(4)
648	COPY_BYTE(5)
649#endif
650EXC(	lbu	t0, NBYTES-2(src), .Ll_exc_copy)
651	SUB	len, len, 1
652EXC(	sb	t0, NBYTES-2(dst), .Ls_exc)
653	SLLV	t0, t0, t3
654	or	t2, t0
655.Lcopy_bytes_done:
656	ADDC(sum, t2)
657.Ldone:
658	/* fold checksum */
659	.set	push
660	.set	noat
661#ifdef USE_DOUBLE
662	dsll32	v1, sum, 0
663	daddu	sum, v1
664	sltu	v1, sum, v1
665	dsra32	sum, sum, 0
666	addu	sum, v1
667#endif
668	sll	v1, sum, 16
669	addu	sum, v1
670	sltu	v1, sum, v1
671	srl	sum, sum, 16
672	addu	sum, v1
673
674	/* odd buffer alignment? */
675	beqz	odd, 1f
676	 nop
677	sll	v1, sum, 8
678	srl	sum, sum, 8
679	or	sum, v1
680	andi	sum, 0xffff
681	.set	pop
6821:
683	.set reorder
684	ADDC(sum, psum)
685	jr	ra
686	.set noreorder
687
688.Ll_exc_copy:
689	/*
690	 * Copy bytes from src until faulting load address (or until a
691	 * lb faults)
692	 *
693	 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
694	 * may be more than a byte beyond the last address.
695	 * Hence, the lb below may get an exception.
696	 *
697	 * Assumes src < THREAD_BUADDR($28)
698	 */
699	LOAD	t0, TI_TASK($28)
700	 li	t2, SHIFT_START
701	LOAD	t0, THREAD_BUADDR(t0)
7021:
703EXC(	lbu	t1, 0(src),	.Ll_exc)
704	ADD	src, src, 1
705	sb	t1, 0(dst)	# can't fault -- we're copy_from_user
706	SLLV	t1, t1, t2
707	addu	t2, SHIFT_INC
708	ADDC(sum, t1)
709	.set	reorder				/* DADDI_WAR */
710	ADD	dst, dst, 1
711	bne	src, t0, 1b
712	.set	noreorder
713.Ll_exc:
714	LOAD	t0, TI_TASK($28)
715	 nop
716	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
717	 nop
718	SUB	len, AT, t0		# len number of uncopied bytes
719	/*
720	 * Here's where we rely on src and dst being incremented in tandem,
721	 *   See (3) above.
722	 * dst += (fault addr - src) to put dst at first byte to clear
723	 */
724	ADD	dst, t0			# compute start address in a1
725	SUB	dst, src
726	/*
727	 * Clear len bytes starting at dst.  Can't call __bzero because it
728	 * might modify len.  An inefficient loop for these rare times...
729	 */
730	.set	reorder				/* DADDI_WAR */
731	SUB	src, len, 1
732	beqz	len, .Ldone
733	.set	noreorder
7341:	sb	zero, 0(dst)
735	ADD	dst, dst, 1
736	.set	push
737	.set	noat
738#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
739	bnez	src, 1b
740	 SUB	src, src, 1
741#else
742	li	v1, 1
743	bnez	src, 1b
744	 SUB	src, src, v1
745#endif
746	li	v1, -EFAULT
747	b	.Ldone
748	 sw	v1, (errptr)
749
750.Ls_exc:
751	li	v0, -1 /* invalid checksum */
752	li	v1, -EFAULT
753	jr	ra
754	 sw	v1, (errptr)
755	.set	pop
756	END(__csum_partial_copy_user)
757