xref: /openbmc/linux/arch/mips/lib/csum_partial.S (revision 384740dc)
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Quick'n'dirty IP checksum ...
7 *
8 * Copyright (C) 1998, 1999 Ralf Baechle
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Copyright (C) 2007  Maciej W. Rozycki
11 */
12#include <linux/errno.h>
13#include <asm/asm.h>
14#include <asm/asm-offsets.h>
15#include <asm/regdef.h>
16
17#ifdef CONFIG_64BIT
18/*
19 * As we are sharing code base with the mips32 tree (which use the o32 ABI
20 * register definitions). We need to redefine the register definitions from
21 * the n64 ABI register naming to the o32 ABI register naming.
22 */
23#undef t0
24#undef t1
25#undef t2
26#undef t3
27#define t0	$8
28#define t1	$9
29#define t2	$10
30#define t3	$11
31#define t4	$12
32#define t5	$13
33#define t6	$14
34#define t7	$15
35
36#define USE_DOUBLE
37#endif
38
39#ifdef USE_DOUBLE
40
41#define LOAD   ld
42#define LOAD32 lwu
43#define ADD    daddu
44#define NBYTES 8
45
46#else
47
48#define LOAD   lw
49#define LOAD32 lw
50#define ADD    addu
51#define NBYTES 4
52
53#endif /* USE_DOUBLE */
54
55#define UNIT(unit)  ((unit)*NBYTES)
56
57#define ADDC(sum,reg)						\
58	.set	push;						\
59	.set	noat;						\
60	ADD	sum, reg;					\
61	sltu	v1, sum, reg;					\
62	ADD	sum, v1;					\
63	.set	pop
64
65#define ADDC32(sum,reg)						\
66	.set	push;						\
67	.set	noat;						\
68	addu	sum, reg;					\
69	sltu	v1, sum, reg;					\
70	addu	sum, v1;					\
71	.set	pop
72
73#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)	\
74	LOAD	_t0, (offset + UNIT(0))(src);			\
75	LOAD	_t1, (offset + UNIT(1))(src);			\
76	LOAD	_t2, (offset + UNIT(2))(src); 			\
77	LOAD	_t3, (offset + UNIT(3))(src); 			\
78	ADDC(sum, _t0);						\
79	ADDC(sum, _t1);						\
80	ADDC(sum, _t2);						\
81	ADDC(sum, _t3)
82
83#ifdef USE_DOUBLE
84#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3)	\
85	CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3)
86#else
87#define CSUM_BIGCHUNK(src, offset, sum, _t0, _t1, _t2, _t3)	\
88	CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3);	\
89	CSUM_BIGCHUNK1(src, offset + 0x10, sum, _t0, _t1, _t2, _t3)
90#endif
91
92/*
93 * a0: source address
94 * a1: length of the area to checksum
95 * a2: partial checksum
96 */
97
98#define src a0
99#define sum v0
100
101	.text
102	.set	noreorder
103	.align	5
104LEAF(csum_partial)
105	move	sum, zero
106	move	t7, zero
107
108	sltiu	t8, a1, 0x8
109	bnez	t8, .Lsmall_csumcpy		/* < 8 bytes to copy */
110	 move	t2, a1
111
112	andi	t7, src, 0x1			/* odd buffer? */
113
114.Lhword_align:
115	beqz	t7, .Lword_align
116	 andi	t8, src, 0x2
117
118	lbu	t0, (src)
119	LONG_SUBU	a1, a1, 0x1
120#ifdef __MIPSEL__
121	sll	t0, t0, 8
122#endif
123	ADDC(sum, t0)
124	PTR_ADDU	src, src, 0x1
125	andi	t8, src, 0x2
126
127.Lword_align:
128	beqz	t8, .Ldword_align
129	 sltiu	t8, a1, 56
130
131	lhu	t0, (src)
132	LONG_SUBU	a1, a1, 0x2
133	ADDC(sum, t0)
134	sltiu	t8, a1, 56
135	PTR_ADDU	src, src, 0x2
136
137.Ldword_align:
138	bnez	t8, .Ldo_end_words
139	 move	t8, a1
140
141	andi	t8, src, 0x4
142	beqz	t8, .Lqword_align
143	 andi	t8, src, 0x8
144
145	LOAD32	t0, 0x00(src)
146	LONG_SUBU	a1, a1, 0x4
147	ADDC(sum, t0)
148	PTR_ADDU	src, src, 0x4
149	andi	t8, src, 0x8
150
151.Lqword_align:
152	beqz	t8, .Loword_align
153	 andi	t8, src, 0x10
154
155#ifdef USE_DOUBLE
156	ld	t0, 0x00(src)
157	LONG_SUBU	a1, a1, 0x8
158	ADDC(sum, t0)
159#else
160	lw	t0, 0x00(src)
161	lw	t1, 0x04(src)
162	LONG_SUBU	a1, a1, 0x8
163	ADDC(sum, t0)
164	ADDC(sum, t1)
165#endif
166	PTR_ADDU	src, src, 0x8
167	andi	t8, src, 0x10
168
169.Loword_align:
170	beqz	t8, .Lbegin_movement
171	 LONG_SRL	t8, a1, 0x7
172
173#ifdef USE_DOUBLE
174	ld	t0, 0x00(src)
175	ld	t1, 0x08(src)
176	ADDC(sum, t0)
177	ADDC(sum, t1)
178#else
179	CSUM_BIGCHUNK1(src, 0x00, sum, t0, t1, t3, t4)
180#endif
181	LONG_SUBU	a1, a1, 0x10
182	PTR_ADDU	src, src, 0x10
183	LONG_SRL	t8, a1, 0x7
184
185.Lbegin_movement:
186	beqz	t8, 1f
187	 andi	t2, a1, 0x40
188
189.Lmove_128bytes:
190	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
191	CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
192	CSUM_BIGCHUNK(src, 0x40, sum, t0, t1, t3, t4)
193	CSUM_BIGCHUNK(src, 0x60, sum, t0, t1, t3, t4)
194	LONG_SUBU	t8, t8, 0x01
195	.set	reorder				/* DADDI_WAR */
196	PTR_ADDU	src, src, 0x80
197	bnez	t8, .Lmove_128bytes
198	.set	noreorder
199
2001:
201	beqz	t2, 1f
202	 andi	t2, a1, 0x20
203
204.Lmove_64bytes:
205	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
206	CSUM_BIGCHUNK(src, 0x20, sum, t0, t1, t3, t4)
207	PTR_ADDU	src, src, 0x40
208
2091:
210	beqz	t2, .Ldo_end_words
211	 andi	t8, a1, 0x1c
212
213.Lmove_32bytes:
214	CSUM_BIGCHUNK(src, 0x00, sum, t0, t1, t3, t4)
215	andi	t8, a1, 0x1c
216	PTR_ADDU	src, src, 0x20
217
218.Ldo_end_words:
219	beqz	t8, .Lsmall_csumcpy
220	 andi	t2, a1, 0x3
221	LONG_SRL	t8, t8, 0x2
222
223.Lend_words:
224	LOAD32	t0, (src)
225	LONG_SUBU	t8, t8, 0x1
226	ADDC(sum, t0)
227	.set	reorder				/* DADDI_WAR */
228	PTR_ADDU	src, src, 0x4
229	bnez	t8, .Lend_words
230	.set	noreorder
231
232/* unknown src alignment and < 8 bytes to go  */
233.Lsmall_csumcpy:
234	move	a1, t2
235
236	andi	t0, a1, 4
237	beqz	t0, 1f
238	 andi	t0, a1, 2
239
240	/* Still a full word to go  */
241	ulw	t1, (src)
242	PTR_ADDIU	src, 4
243#ifdef USE_DOUBLE
244	dsll	t1, t1, 32			/* clear lower 32bit */
245#endif
246	ADDC(sum, t1)
247
2481:	move	t1, zero
249	beqz	t0, 1f
250	 andi	t0, a1, 1
251
252	/* Still a halfword to go  */
253	ulhu	t1, (src)
254	PTR_ADDIU	src, 2
255
2561:	beqz	t0, 1f
257	 sll	t1, t1, 16
258
259	lbu	t2, (src)
260	 nop
261
262#ifdef __MIPSEB__
263	sll	t2, t2, 8
264#endif
265	or	t1, t2
266
2671:	ADDC(sum, t1)
268
269	/* fold checksum */
270	.set	push
271	.set	noat
272#ifdef USE_DOUBLE
273	dsll32	v1, sum, 0
274	daddu	sum, v1
275	sltu	v1, sum, v1
276	dsra32	sum, sum, 0
277	addu	sum, v1
278#endif
279	sll	v1, sum, 16
280	addu	sum, v1
281	sltu	v1, sum, v1
282	srl	sum, sum, 16
283	addu	sum, v1
284
285	/* odd buffer alignment? */
286	beqz	t7, 1f
287	 nop
288	sll	v1, sum, 8
289	srl	sum, sum, 8
290	or	sum, v1
291	andi	sum, 0xffff
292	.set	pop
2931:
294	.set	reorder
295	/* Add the passed partial csum.  */
296	ADDC32(sum, a2)
297	jr	ra
298	.set	noreorder
299	END(csum_partial)
300
301
302/*
303 * checksum and copy routines based on memcpy.S
304 *
305 *	csum_partial_copy_nocheck(src, dst, len, sum)
306 *	__csum_partial_copy_user(src, dst, len, sum, errp)
307 *
308 * See "Spec" in memcpy.S for details.  Unlike __copy_user, all
309 * function in this file use the standard calling convention.
310 */
311
312#define src a0
313#define dst a1
314#define len a2
315#define psum a3
316#define sum v0
317#define odd t8
318#define errptr t9
319
320/*
321 * The exception handler for loads requires that:
322 *  1- AT contain the address of the byte just past the end of the source
323 *     of the copy,
324 *  2- src_entry <= src < AT, and
325 *  3- (dst - src) == (dst_entry - src_entry),
326 * The _entry suffix denotes values when __copy_user was called.
327 *
328 * (1) is set up up by __csum_partial_copy_from_user and maintained by
329 *	not writing AT in __csum_partial_copy
330 * (2) is met by incrementing src by the number of bytes copied
331 * (3) is met by not doing loads between a pair of increments of dst and src
332 *
333 * The exception handlers for stores stores -EFAULT to errptr and return.
334 * These handlers do not need to overwrite any data.
335 */
336
337#define EXC(inst_reg,addr,handler)		\
3389:	inst_reg, addr;				\
339	.section __ex_table,"a";		\
340	PTR	9b, handler;			\
341	.previous
342
343#ifdef USE_DOUBLE
344
345#define LOAD   ld
346#define LOADL  ldl
347#define LOADR  ldr
348#define STOREL sdl
349#define STORER sdr
350#define STORE  sd
351#define ADD    daddu
352#define SUB    dsubu
353#define SRL    dsrl
354#define SLL    dsll
355#define SLLV   dsllv
356#define SRLV   dsrlv
357#define NBYTES 8
358#define LOG_NBYTES 3
359
360#else
361
362#define LOAD   lw
363#define LOADL  lwl
364#define LOADR  lwr
365#define STOREL swl
366#define STORER swr
367#define STORE  sw
368#define ADD    addu
369#define SUB    subu
370#define SRL    srl
371#define SLL    sll
372#define SLLV   sllv
373#define SRLV   srlv
374#define NBYTES 4
375#define LOG_NBYTES 2
376
377#endif /* USE_DOUBLE */
378
379#ifdef CONFIG_CPU_LITTLE_ENDIAN
380#define LDFIRST LOADR
381#define LDREST  LOADL
382#define STFIRST STORER
383#define STREST  STOREL
384#define SHIFT_DISCARD SLLV
385#define SHIFT_DISCARD_REVERT SRLV
386#else
387#define LDFIRST LOADL
388#define LDREST  LOADR
389#define STFIRST STOREL
390#define STREST  STORER
391#define SHIFT_DISCARD SRLV
392#define SHIFT_DISCARD_REVERT SLLV
393#endif
394
395#define FIRST(unit) ((unit)*NBYTES)
396#define REST(unit)  (FIRST(unit)+NBYTES-1)
397
398#define ADDRMASK (NBYTES-1)
399
400#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
401	.set	noat
402#else
403	.set	at=v1
404#endif
405
406LEAF(__csum_partial_copy_user)
407	PTR_ADDU	AT, src, len	/* See (1) above. */
408#ifdef CONFIG_64BIT
409	move	errptr, a4
410#else
411	lw	errptr, 16(sp)
412#endif
413FEXPORT(csum_partial_copy_nocheck)
414	move	sum, zero
415	move	odd, zero
416	/*
417	 * Note: dst & src may be unaligned, len may be 0
418	 * Temps
419	 */
420	/*
421	 * The "issue break"s below are very approximate.
422	 * Issue delays for dcache fills will perturb the schedule, as will
423	 * load queue full replay traps, etc.
424	 *
425	 * If len < NBYTES use byte operations.
426	 */
427	sltu	t2, len, NBYTES
428	and	t1, dst, ADDRMASK
429	bnez	t2, .Lcopy_bytes_checklen
430	 and	t0, src, ADDRMASK
431	andi	odd, dst, 0x1			/* odd buffer? */
432	bnez	t1, .Ldst_unaligned
433	 nop
434	bnez	t0, .Lsrc_unaligned_dst_aligned
435	/*
436	 * use delay slot for fall-through
437	 * src and dst are aligned; need to compute rem
438	 */
439.Lboth_aligned:
440	 SRL	t0, len, LOG_NBYTES+3    # +3 for 8 units/iter
441	beqz	t0, .Lcleanup_both_aligned # len < 8*NBYTES
442	 nop
443	SUB	len, 8*NBYTES		# subtract here for bgez loop
444	.align	4
4451:
446EXC(	LOAD	t0, UNIT(0)(src),	.Ll_exc)
447EXC(	LOAD	t1, UNIT(1)(src),	.Ll_exc_copy)
448EXC(	LOAD	t2, UNIT(2)(src),	.Ll_exc_copy)
449EXC(	LOAD	t3, UNIT(3)(src),	.Ll_exc_copy)
450EXC(	LOAD	t4, UNIT(4)(src),	.Ll_exc_copy)
451EXC(	LOAD	t5, UNIT(5)(src),	.Ll_exc_copy)
452EXC(	LOAD	t6, UNIT(6)(src),	.Ll_exc_copy)
453EXC(	LOAD	t7, UNIT(7)(src),	.Ll_exc_copy)
454	SUB	len, len, 8*NBYTES
455	ADD	src, src, 8*NBYTES
456EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc)
457	ADDC(sum, t0)
458EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc)
459	ADDC(sum, t1)
460EXC(	STORE	t2, UNIT(2)(dst),	.Ls_exc)
461	ADDC(sum, t2)
462EXC(	STORE	t3, UNIT(3)(dst),	.Ls_exc)
463	ADDC(sum, t3)
464EXC(	STORE	t4, UNIT(4)(dst),	.Ls_exc)
465	ADDC(sum, t4)
466EXC(	STORE	t5, UNIT(5)(dst),	.Ls_exc)
467	ADDC(sum, t5)
468EXC(	STORE	t6, UNIT(6)(dst),	.Ls_exc)
469	ADDC(sum, t6)
470EXC(	STORE	t7, UNIT(7)(dst),	.Ls_exc)
471	ADDC(sum, t7)
472	.set	reorder				/* DADDI_WAR */
473	ADD	dst, dst, 8*NBYTES
474	bgez	len, 1b
475	.set	noreorder
476	ADD	len, 8*NBYTES		# revert len (see above)
477
478	/*
479	 * len == the number of bytes left to copy < 8*NBYTES
480	 */
481.Lcleanup_both_aligned:
482#define rem t7
483	beqz	len, .Ldone
484	 sltu	t0, len, 4*NBYTES
485	bnez	t0, .Lless_than_4units
486	 and	rem, len, (NBYTES-1)	# rem = len % NBYTES
487	/*
488	 * len >= 4*NBYTES
489	 */
490EXC(	LOAD	t0, UNIT(0)(src),	.Ll_exc)
491EXC(	LOAD	t1, UNIT(1)(src),	.Ll_exc_copy)
492EXC(	LOAD	t2, UNIT(2)(src),	.Ll_exc_copy)
493EXC(	LOAD	t3, UNIT(3)(src),	.Ll_exc_copy)
494	SUB	len, len, 4*NBYTES
495	ADD	src, src, 4*NBYTES
496EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc)
497	ADDC(sum, t0)
498EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc)
499	ADDC(sum, t1)
500EXC(	STORE	t2, UNIT(2)(dst),	.Ls_exc)
501	ADDC(sum, t2)
502EXC(	STORE	t3, UNIT(3)(dst),	.Ls_exc)
503	ADDC(sum, t3)
504	.set	reorder				/* DADDI_WAR */
505	ADD	dst, dst, 4*NBYTES
506	beqz	len, .Ldone
507	.set	noreorder
508.Lless_than_4units:
509	/*
510	 * rem = len % NBYTES
511	 */
512	beq	rem, len, .Lcopy_bytes
513	 nop
5141:
515EXC(	LOAD	t0, 0(src),		.Ll_exc)
516	ADD	src, src, NBYTES
517	SUB	len, len, NBYTES
518EXC(	STORE	t0, 0(dst),		.Ls_exc)
519	ADDC(sum, t0)
520	.set	reorder				/* DADDI_WAR */
521	ADD	dst, dst, NBYTES
522	bne	rem, len, 1b
523	.set	noreorder
524
525	/*
526	 * src and dst are aligned, need to copy rem bytes (rem < NBYTES)
527	 * A loop would do only a byte at a time with possible branch
528	 * mispredicts.  Can't do an explicit LOAD dst,mask,or,STORE
529	 * because can't assume read-access to dst.  Instead, use
530	 * STREST dst, which doesn't require read access to dst.
531	 *
532	 * This code should perform better than a simple loop on modern,
533	 * wide-issue mips processors because the code has fewer branches and
534	 * more instruction-level parallelism.
535	 */
536#define bits t2
537	beqz	len, .Ldone
538	 ADD	t1, dst, len	# t1 is just past last byte of dst
539	li	bits, 8*NBYTES
540	SLL	rem, len, 3	# rem = number of bits to keep
541EXC(	LOAD	t0, 0(src),		.Ll_exc)
542	SUB	bits, bits, rem	# bits = number of bits to discard
543	SHIFT_DISCARD t0, t0, bits
544EXC(	STREST	t0, -1(t1),		.Ls_exc)
545	SHIFT_DISCARD_REVERT t0, t0, bits
546	.set reorder
547	ADDC(sum, t0)
548	b	.Ldone
549	.set noreorder
550.Ldst_unaligned:
551	/*
552	 * dst is unaligned
553	 * t0 = src & ADDRMASK
554	 * t1 = dst & ADDRMASK; T1 > 0
555	 * len >= NBYTES
556	 *
557	 * Copy enough bytes to align dst
558	 * Set match = (src and dst have same alignment)
559	 */
560#define match rem
561EXC(	LDFIRST	t3, FIRST(0)(src),	.Ll_exc)
562	ADD	t2, zero, NBYTES
563EXC(	LDREST	t3, REST(0)(src),	.Ll_exc_copy)
564	SUB	t2, t2, t1	# t2 = number of bytes copied
565	xor	match, t0, t1
566EXC(	STFIRST t3, FIRST(0)(dst),	.Ls_exc)
567	SLL	t4, t1, 3		# t4 = number of bits to discard
568	SHIFT_DISCARD t3, t3, t4
569	/* no SHIFT_DISCARD_REVERT to handle odd buffer properly */
570	ADDC(sum, t3)
571	beq	len, t2, .Ldone
572	 SUB	len, len, t2
573	ADD	dst, dst, t2
574	beqz	match, .Lboth_aligned
575	 ADD	src, src, t2
576
577.Lsrc_unaligned_dst_aligned:
578	SRL	t0, len, LOG_NBYTES+2    # +2 for 4 units/iter
579	beqz	t0, .Lcleanup_src_unaligned
580	 and	rem, len, (4*NBYTES-1)   # rem = len % 4*NBYTES
5811:
582/*
583 * Avoid consecutive LD*'s to the same register since some mips
584 * implementations can't issue them in the same cycle.
585 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
586 * are to the same unit (unless src is aligned, but it's not).
587 */
588EXC(	LDFIRST	t0, FIRST(0)(src),	.Ll_exc)
589EXC(	LDFIRST	t1, FIRST(1)(src),	.Ll_exc_copy)
590	SUB     len, len, 4*NBYTES
591EXC(	LDREST	t0, REST(0)(src),	.Ll_exc_copy)
592EXC(	LDREST	t1, REST(1)(src),	.Ll_exc_copy)
593EXC(	LDFIRST	t2, FIRST(2)(src),	.Ll_exc_copy)
594EXC(	LDFIRST	t3, FIRST(3)(src),	.Ll_exc_copy)
595EXC(	LDREST	t2, REST(2)(src),	.Ll_exc_copy)
596EXC(	LDREST	t3, REST(3)(src),	.Ll_exc_copy)
597	ADD	src, src, 4*NBYTES
598#ifdef CONFIG_CPU_SB1
599	nop				# improves slotting
600#endif
601EXC(	STORE	t0, UNIT(0)(dst),	.Ls_exc)
602	ADDC(sum, t0)
603EXC(	STORE	t1, UNIT(1)(dst),	.Ls_exc)
604	ADDC(sum, t1)
605EXC(	STORE	t2, UNIT(2)(dst),	.Ls_exc)
606	ADDC(sum, t2)
607EXC(	STORE	t3, UNIT(3)(dst),	.Ls_exc)
608	ADDC(sum, t3)
609	.set	reorder				/* DADDI_WAR */
610	ADD	dst, dst, 4*NBYTES
611	bne	len, rem, 1b
612	.set	noreorder
613
614.Lcleanup_src_unaligned:
615	beqz	len, .Ldone
616	 and	rem, len, NBYTES-1  # rem = len % NBYTES
617	beq	rem, len, .Lcopy_bytes
618	 nop
6191:
620EXC(	LDFIRST t0, FIRST(0)(src),	.Ll_exc)
621EXC(	LDREST	t0, REST(0)(src),	.Ll_exc_copy)
622	ADD	src, src, NBYTES
623	SUB	len, len, NBYTES
624EXC(	STORE	t0, 0(dst),		.Ls_exc)
625	ADDC(sum, t0)
626	.set	reorder				/* DADDI_WAR */
627	ADD	dst, dst, NBYTES
628	bne	len, rem, 1b
629	.set	noreorder
630
631.Lcopy_bytes_checklen:
632	beqz	len, .Ldone
633	 nop
634.Lcopy_bytes:
635	/* 0 < len < NBYTES  */
636#ifdef CONFIG_CPU_LITTLE_ENDIAN
637#define SHIFT_START 0
638#define SHIFT_INC 8
639#else
640#define SHIFT_START 8*(NBYTES-1)
641#define SHIFT_INC -8
642#endif
643	move	t2, zero	# partial word
644	li	t3, SHIFT_START	# shift
645/* use .Ll_exc_copy here to return correct sum on fault */
646#define COPY_BYTE(N)			\
647EXC(	lbu	t0, N(src), .Ll_exc_copy);	\
648	SUB	len, len, 1;		\
649EXC(	sb	t0, N(dst), .Ls_exc);	\
650	SLLV	t0, t0, t3;		\
651	addu	t3, SHIFT_INC;		\
652	beqz	len, .Lcopy_bytes_done;	\
653	 or	t2, t0
654
655	COPY_BYTE(0)
656	COPY_BYTE(1)
657#ifdef USE_DOUBLE
658	COPY_BYTE(2)
659	COPY_BYTE(3)
660	COPY_BYTE(4)
661	COPY_BYTE(5)
662#endif
663EXC(	lbu	t0, NBYTES-2(src), .Ll_exc_copy)
664	SUB	len, len, 1
665EXC(	sb	t0, NBYTES-2(dst), .Ls_exc)
666	SLLV	t0, t0, t3
667	or	t2, t0
668.Lcopy_bytes_done:
669	ADDC(sum, t2)
670.Ldone:
671	/* fold checksum */
672	.set	push
673	.set	noat
674#ifdef USE_DOUBLE
675	dsll32	v1, sum, 0
676	daddu	sum, v1
677	sltu	v1, sum, v1
678	dsra32	sum, sum, 0
679	addu	sum, v1
680#endif
681	sll	v1, sum, 16
682	addu	sum, v1
683	sltu	v1, sum, v1
684	srl	sum, sum, 16
685	addu	sum, v1
686
687	/* odd buffer alignment? */
688	beqz	odd, 1f
689	 nop
690	sll	v1, sum, 8
691	srl	sum, sum, 8
692	or	sum, v1
693	andi	sum, 0xffff
694	.set	pop
6951:
696	.set reorder
697	ADDC32(sum, psum)
698	jr	ra
699	.set noreorder
700
701.Ll_exc_copy:
702	/*
703	 * Copy bytes from src until faulting load address (or until a
704	 * lb faults)
705	 *
706	 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
707	 * may be more than a byte beyond the last address.
708	 * Hence, the lb below may get an exception.
709	 *
710	 * Assumes src < THREAD_BUADDR($28)
711	 */
712	LOAD	t0, TI_TASK($28)
713	 li	t2, SHIFT_START
714	LOAD	t0, THREAD_BUADDR(t0)
7151:
716EXC(	lbu	t1, 0(src),	.Ll_exc)
717	ADD	src, src, 1
718	sb	t1, 0(dst)	# can't fault -- we're copy_from_user
719	SLLV	t1, t1, t2
720	addu	t2, SHIFT_INC
721	ADDC(sum, t1)
722	.set	reorder				/* DADDI_WAR */
723	ADD	dst, dst, 1
724	bne	src, t0, 1b
725	.set	noreorder
726.Ll_exc:
727	LOAD	t0, TI_TASK($28)
728	 nop
729	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
730	 nop
731	SUB	len, AT, t0		# len number of uncopied bytes
732	/*
733	 * Here's where we rely on src and dst being incremented in tandem,
734	 *   See (3) above.
735	 * dst += (fault addr - src) to put dst at first byte to clear
736	 */
737	ADD	dst, t0			# compute start address in a1
738	SUB	dst, src
739	/*
740	 * Clear len bytes starting at dst.  Can't call __bzero because it
741	 * might modify len.  An inefficient loop for these rare times...
742	 */
743	.set	reorder				/* DADDI_WAR */
744	SUB	src, len, 1
745	beqz	len, .Ldone
746	.set	noreorder
7471:	sb	zero, 0(dst)
748	ADD	dst, dst, 1
749	.set	push
750	.set	noat
751#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
752	bnez	src, 1b
753	 SUB	src, src, 1
754#else
755	li	v1, 1
756	bnez	src, 1b
757	 SUB	src, src, v1
758#endif
759	li	v1, -EFAULT
760	b	.Ldone
761	 sw	v1, (errptr)
762
763.Ls_exc:
764	li	v0, -1 /* invalid checksum */
765	li	v1, -EFAULT
766	jr	ra
767	 sw	v1, (errptr)
768	.set	pop
769	END(__csum_partial_copy_user)
770