1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License.  See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Unified implementation of memcpy, memmove and the __copy_user backend.
7 *
8 * Copyright (C) 1998, 99, 2000, 01, 2002 Ralf Baechle (ralf@gnu.org)
9 * Copyright (C) 1999, 2000, 01, 2002 Silicon Graphics, Inc.
10 * Copyright (C) 2002 Broadcom, Inc.
11 *   memcpy/copy_user author: Mark Vandevoorde
12 *
13 * Mnemonic names for arguments to memcpy/__copy_user
14 */
15
16#include <asm/asm.h>
17#include <asm/asm-offsets.h>
18#include <asm/export.h>
19#include <asm/regdef.h>
20
21#define dst a0
22#define src a1
23#define len a2
24
25/*
26 * Spec
27 *
28 * memcpy copies len bytes from src to dst and sets v0 to dst.
29 * It assumes that
30 *   - src and dst don't overlap
31 *   - src is readable
32 *   - dst is writable
33 * memcpy uses the standard calling convention
34 *
35 * __copy_user copies up to len bytes from src to dst and sets a2 (len) to
36 * the number of uncopied bytes due to an exception caused by a read or write.
37 * __copy_user assumes that src and dst don't overlap, and that the call is
38 * implementing one of the following:
39 *   copy_to_user
40 *     - src is readable  (no exceptions when reading src)
41 *   copy_from_user
42 *     - dst is writable  (no exceptions when writing dst)
43 * __copy_user uses a non-standard calling convention; see
44 * arch/mips/include/asm/uaccess.h
45 *
46 * When an exception happens on a load, the handler must
47 # ensure that all of the destination buffer is overwritten to prevent
48 * leaking information to user mode programs.
49 */
50
51/*
52 * Implementation
53 */
54
55/*
56 * The exception handler for loads requires that:
57 *  1- AT contain the address of the byte just past the end of the source
58 *     of the copy,
59 *  2- src_entry <= src < AT, and
60 *  3- (dst - src) == (dst_entry - src_entry),
61 * The _entry suffix denotes values when __copy_user was called.
62 *
63 * (1) is set up up by uaccess.h and maintained by not writing AT in copy_user
64 * (2) is met by incrementing src by the number of bytes copied
65 * (3) is met by not doing loads between a pair of increments of dst and src
66 *
67 * The exception handlers for stores adjust len (if necessary) and return.
68 * These handlers do not need to overwrite any data.
69 *
70 * For __rmemcpy and memmove an exception is always a kernel bug, therefore
71 * they're not protected.
72 */
73
74#define EXC(inst_reg,addr,handler)		\
759:	inst_reg, addr;				\
76	.section __ex_table,"a";		\
77	PTR	9b, handler;			\
78	.previous
79
80/*
81 * Only on the 64-bit kernel we can made use of 64-bit registers.
82 */
83
84#define LOAD   ld
85#define LOADL  ldl
86#define LOADR  ldr
87#define STOREL sdl
88#define STORER sdr
89#define STORE  sd
90#define ADD    daddu
91#define SUB    dsubu
92#define SRL    dsrl
93#define SRA    dsra
94#define SLL    dsll
95#define SLLV   dsllv
96#define SRLV   dsrlv
97#define NBYTES 8
98#define LOG_NBYTES 3
99
100/*
101 * As we are sharing code base with the mips32 tree (which use the o32 ABI
102 * register definitions). We need to redefine the register definitions from
103 * the n64 ABI register naming to the o32 ABI register naming.
104 */
105#undef t0
106#undef t1
107#undef t2
108#undef t3
109#define t0	$8
110#define t1	$9
111#define t2	$10
112#define t3	$11
113#define t4	$12
114#define t5	$13
115#define t6	$14
116#define t7	$15
117
118#ifdef CONFIG_CPU_LITTLE_ENDIAN
119#define LDFIRST LOADR
120#define LDREST	LOADL
121#define STFIRST STORER
122#define STREST	STOREL
123#define SHIFT_DISCARD SLLV
124#else
125#define LDFIRST LOADL
126#define LDREST	LOADR
127#define STFIRST STOREL
128#define STREST	STORER
129#define SHIFT_DISCARD SRLV
130#endif
131
132#define FIRST(unit) ((unit)*NBYTES)
133#define REST(unit)  (FIRST(unit)+NBYTES-1)
134#define UNIT(unit)  FIRST(unit)
135
136#define ADDRMASK (NBYTES-1)
137
138	.text
139	.set	noreorder
140	.set	noat
141
142/*
143 * t7 is used as a flag to note inatomic mode.
144 */
145LEAF(__copy_user_inatomic)
146EXPORT_SYMBOL(__copy_user_inatomic)
147	b	__copy_user_common
148	 li	t7, 1
149	END(__copy_user_inatomic)
150
151/*
152 * A combined memcpy/__copy_user
153 * __copy_user sets len to 0 for success; else to an upper bound of
154 * the number of uncopied bytes.
155 * memcpy sets v0 to dst.
156 */
157	.align	5
158LEAF(memcpy)					/* a0=dst a1=src a2=len */
159EXPORT_SYMBOL(memcpy)
160	move	v0, dst				/* return value */
161__memcpy:
162FEXPORT(__copy_user)
163EXPORT_SYMBOL(__copy_user)
164	li	t7, 0				/* not inatomic */
165__copy_user_common:
166	/*
167	 * Note: dst & src may be unaligned, len may be 0
168	 * Temps
169	 */
170	#
171	# Octeon doesn't care if the destination is unaligned. The hardware
172	# can fix it faster than we can special case the assembly.
173	#
174	pref	0, 0(src)
175	sltu	t0, len, NBYTES		# Check if < 1 word
176	bnez	t0, copy_bytes_checklen
177	 and	t0, src, ADDRMASK	# Check if src unaligned
178	bnez	t0, src_unaligned
179	 sltu	t0, len, 4*NBYTES	# Check if < 4 words
180	bnez	t0, less_than_4units
181	 sltu	t0, len, 8*NBYTES	# Check if < 8 words
182	bnez	t0, less_than_8units
183	 sltu	t0, len, 16*NBYTES	# Check if < 16 words
184	bnez	t0, cleanup_both_aligned
185	 sltu	t0, len, 128+1		# Check if len < 129
186	bnez	t0, 1f			# Skip prefetch if len is too short
187	 sltu	t0, len, 256+1		# Check if len < 257
188	bnez	t0, 1f			# Skip prefetch if len is too short
189	 pref	0, 128(src)		# We must not prefetch invalid addresses
190	#
191	# This is where we loop if there is more than 128 bytes left
1922:	pref	0, 256(src)		# We must not prefetch invalid addresses
193	#
194	# This is where we loop if we can't prefetch anymore
1951:
196EXC(	LOAD	t0, UNIT(0)(src),	l_exc)
197EXC(	LOAD	t1, UNIT(1)(src),	l_exc_copy)
198EXC(	LOAD	t2, UNIT(2)(src),	l_exc_copy)
199EXC(	LOAD	t3, UNIT(3)(src),	l_exc_copy)
200	SUB	len, len, 16*NBYTES
201EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p16u)
202EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p15u)
203EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p14u)
204EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p13u)
205EXC(	LOAD	t0, UNIT(4)(src),	l_exc_copy)
206EXC(	LOAD	t1, UNIT(5)(src),	l_exc_copy)
207EXC(	LOAD	t2, UNIT(6)(src),	l_exc_copy)
208EXC(	LOAD	t3, UNIT(7)(src),	l_exc_copy)
209EXC(	STORE	t0, UNIT(4)(dst),	s_exc_p12u)
210EXC(	STORE	t1, UNIT(5)(dst),	s_exc_p11u)
211EXC(	STORE	t2, UNIT(6)(dst),	s_exc_p10u)
212	ADD	src, src, 16*NBYTES
213EXC(	STORE	t3, UNIT(7)(dst),	s_exc_p9u)
214	ADD	dst, dst, 16*NBYTES
215EXC(	LOAD	t0, UNIT(-8)(src),	l_exc_copy_rewind16)
216EXC(	LOAD	t1, UNIT(-7)(src),	l_exc_copy_rewind16)
217EXC(	LOAD	t2, UNIT(-6)(src),	l_exc_copy_rewind16)
218EXC(	LOAD	t3, UNIT(-5)(src),	l_exc_copy_rewind16)
219EXC(	STORE	t0, UNIT(-8)(dst),	s_exc_p8u)
220EXC(	STORE	t1, UNIT(-7)(dst),	s_exc_p7u)
221EXC(	STORE	t2, UNIT(-6)(dst),	s_exc_p6u)
222EXC(	STORE	t3, UNIT(-5)(dst),	s_exc_p5u)
223EXC(	LOAD	t0, UNIT(-4)(src),	l_exc_copy_rewind16)
224EXC(	LOAD	t1, UNIT(-3)(src),	l_exc_copy_rewind16)
225EXC(	LOAD	t2, UNIT(-2)(src),	l_exc_copy_rewind16)
226EXC(	LOAD	t3, UNIT(-1)(src),	l_exc_copy_rewind16)
227EXC(	STORE	t0, UNIT(-4)(dst),	s_exc_p4u)
228EXC(	STORE	t1, UNIT(-3)(dst),	s_exc_p3u)
229EXC(	STORE	t2, UNIT(-2)(dst),	s_exc_p2u)
230EXC(	STORE	t3, UNIT(-1)(dst),	s_exc_p1u)
231	sltu	t0, len, 256+1		# See if we can prefetch more
232	beqz	t0, 2b
233	 sltu	t0, len, 128		# See if we can loop more time
234	beqz	t0, 1b
235	 nop
236	#
237	# Jump here if there are less than 16*NBYTES left.
238	#
239cleanup_both_aligned:
240	beqz	len, done
241	 sltu	t0, len, 8*NBYTES
242	bnez	t0, less_than_8units
243	 nop
244EXC(	LOAD	t0, UNIT(0)(src),	l_exc)
245EXC(	LOAD	t1, UNIT(1)(src),	l_exc_copy)
246EXC(	LOAD	t2, UNIT(2)(src),	l_exc_copy)
247EXC(	LOAD	t3, UNIT(3)(src),	l_exc_copy)
248	SUB	len, len, 8*NBYTES
249EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p8u)
250EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p7u)
251EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p6u)
252EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p5u)
253EXC(	LOAD	t0, UNIT(4)(src),	l_exc_copy)
254EXC(	LOAD	t1, UNIT(5)(src),	l_exc_copy)
255EXC(	LOAD	t2, UNIT(6)(src),	l_exc_copy)
256EXC(	LOAD	t3, UNIT(7)(src),	l_exc_copy)
257EXC(	STORE	t0, UNIT(4)(dst),	s_exc_p4u)
258EXC(	STORE	t1, UNIT(5)(dst),	s_exc_p3u)
259EXC(	STORE	t2, UNIT(6)(dst),	s_exc_p2u)
260EXC(	STORE	t3, UNIT(7)(dst),	s_exc_p1u)
261	ADD	src, src, 8*NBYTES
262	beqz	len, done
263	 ADD	dst, dst, 8*NBYTES
264	#
265	# Jump here if there are less than 8*NBYTES left.
266	#
267less_than_8units:
268	sltu	t0, len, 4*NBYTES
269	bnez	t0, less_than_4units
270	 nop
271EXC(	LOAD	t0, UNIT(0)(src),	l_exc)
272EXC(	LOAD	t1, UNIT(1)(src),	l_exc_copy)
273EXC(	LOAD	t2, UNIT(2)(src),	l_exc_copy)
274EXC(	LOAD	t3, UNIT(3)(src),	l_exc_copy)
275	SUB	len, len, 4*NBYTES
276EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p4u)
277EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p3u)
278EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p2u)
279EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p1u)
280	ADD	src, src, 4*NBYTES
281	beqz	len, done
282	 ADD	dst, dst, 4*NBYTES
283	#
284	# Jump here if there are less than 4*NBYTES left. This means
285	# we may need to copy up to 3 NBYTES words.
286	#
287less_than_4units:
288	sltu	t0, len, 1*NBYTES
289	bnez	t0, copy_bytes_checklen
290	 nop
291	#
292	# 1) Copy NBYTES, then check length again
293	#
294EXC(	LOAD	t0, 0(src),		l_exc)
295	SUB	len, len, NBYTES
296	sltu	t1, len, 8
297EXC(	STORE	t0, 0(dst),		s_exc_p1u)
298	ADD	src, src, NBYTES
299	bnez	t1, copy_bytes_checklen
300	 ADD	dst, dst, NBYTES
301	#
302	# 2) Copy NBYTES, then check length again
303	#
304EXC(	LOAD	t0, 0(src),		l_exc)
305	SUB	len, len, NBYTES
306	sltu	t1, len, 8
307EXC(	STORE	t0, 0(dst),		s_exc_p1u)
308	ADD	src, src, NBYTES
309	bnez	t1, copy_bytes_checklen
310	 ADD	dst, dst, NBYTES
311	#
312	# 3) Copy NBYTES, then check length again
313	#
314EXC(	LOAD	t0, 0(src),		l_exc)
315	SUB	len, len, NBYTES
316	ADD	src, src, NBYTES
317	ADD	dst, dst, NBYTES
318	b copy_bytes_checklen
319EXC(	 STORE	t0, -8(dst),		s_exc_p1u)
320
321src_unaligned:
322#define rem t8
323	SRL	t0, len, LOG_NBYTES+2	 # +2 for 4 units/iter
324	beqz	t0, cleanup_src_unaligned
325	 and	rem, len, (4*NBYTES-1)	 # rem = len % 4*NBYTES
3261:
327/*
328 * Avoid consecutive LD*'s to the same register since some mips
329 * implementations can't issue them in the same cycle.
330 * It's OK to load FIRST(N+1) before REST(N) because the two addresses
331 * are to the same unit (unless src is aligned, but it's not).
332 */
333EXC(	LDFIRST t0, FIRST(0)(src),	l_exc)
334EXC(	LDFIRST t1, FIRST(1)(src),	l_exc_copy)
335	SUB	len, len, 4*NBYTES
336EXC(	LDREST	t0, REST(0)(src),	l_exc_copy)
337EXC(	LDREST	t1, REST(1)(src),	l_exc_copy)
338EXC(	LDFIRST t2, FIRST(2)(src),	l_exc_copy)
339EXC(	LDFIRST t3, FIRST(3)(src),	l_exc_copy)
340EXC(	LDREST	t2, REST(2)(src),	l_exc_copy)
341EXC(	LDREST	t3, REST(3)(src),	l_exc_copy)
342	ADD	src, src, 4*NBYTES
343EXC(	STORE	t0, UNIT(0)(dst),	s_exc_p4u)
344EXC(	STORE	t1, UNIT(1)(dst),	s_exc_p3u)
345EXC(	STORE	t2, UNIT(2)(dst),	s_exc_p2u)
346EXC(	STORE	t3, UNIT(3)(dst),	s_exc_p1u)
347	bne	len, rem, 1b
348	 ADD	dst, dst, 4*NBYTES
349
350cleanup_src_unaligned:
351	beqz	len, done
352	 and	rem, len, NBYTES-1  # rem = len % NBYTES
353	beq	rem, len, copy_bytes
354	 nop
3551:
356EXC(	LDFIRST t0, FIRST(0)(src),	l_exc)
357EXC(	LDREST	t0, REST(0)(src),	l_exc_copy)
358	SUB	len, len, NBYTES
359EXC(	STORE	t0, 0(dst),		s_exc_p1u)
360	ADD	src, src, NBYTES
361	bne	len, rem, 1b
362	 ADD	dst, dst, NBYTES
363
364copy_bytes_checklen:
365	beqz	len, done
366	 nop
367copy_bytes:
368	/* 0 < len < NBYTES  */
369#define COPY_BYTE(N)			\
370EXC(	lb	t0, N(src), l_exc);	\
371	SUB	len, len, 1;		\
372	beqz	len, done;		\
373EXC(	 sb	t0, N(dst), s_exc_p1)
374
375	COPY_BYTE(0)
376	COPY_BYTE(1)
377	COPY_BYTE(2)
378	COPY_BYTE(3)
379	COPY_BYTE(4)
380	COPY_BYTE(5)
381EXC(	lb	t0, NBYTES-2(src), l_exc)
382	SUB	len, len, 1
383	jr	ra
384EXC(	 sb	t0, NBYTES-2(dst), s_exc_p1)
385done:
386	jr	ra
387	 nop
388	END(memcpy)
389
390l_exc_copy_rewind16:
391	/* Rewind src and dst by 16*NBYTES for l_exc_copy */
392	SUB	src, src, 16*NBYTES
393	SUB	dst, dst, 16*NBYTES
394l_exc_copy:
395	/*
396	 * Copy bytes from src until faulting load address (or until a
397	 * lb faults)
398	 *
399	 * When reached by a faulting LDFIRST/LDREST, THREAD_BUADDR($28)
400	 * may be more than a byte beyond the last address.
401	 * Hence, the lb below may get an exception.
402	 *
403	 * Assumes src < THREAD_BUADDR($28)
404	 */
405	LOAD	t0, TI_TASK($28)
406	LOAD	t0, THREAD_BUADDR(t0)
4071:
408EXC(	lb	t1, 0(src),	l_exc)
409	ADD	src, src, 1
410	sb	t1, 0(dst)	# can't fault -- we're copy_from_user
411	bne	src, t0, 1b
412	 ADD	dst, dst, 1
413l_exc:
414	LOAD	t0, TI_TASK($28)
415	LOAD	t0, THREAD_BUADDR(t0)	# t0 is just past last good address
416	SUB	len, AT, t0		# len number of uncopied bytes
417	bnez	t7, 2f		/* Skip the zeroing out part if inatomic */
418	/*
419	 * Here's where we rely on src and dst being incremented in tandem,
420	 *   See (3) above.
421	 * dst += (fault addr - src) to put dst at first byte to clear
422	 */
423	ADD	dst, t0			# compute start address in a1
424	SUB	dst, src
425	/*
426	 * Clear len bytes starting at dst.  Can't call __bzero because it
427	 * might modify len.  An inefficient loop for these rare times...
428	 */
429	beqz	len, done
430	 SUB	src, len, 1
4311:	sb	zero, 0(dst)
432	ADD	dst, dst, 1
433	bnez	src, 1b
434	 SUB	src, src, 1
4352:	jr	ra
436	 nop
437
438
439#define SEXC(n)				\
440s_exc_p ## n ## u:			\
441	jr	ra;			\
442	 ADD	len, len, n*NBYTES
443
444SEXC(16)
445SEXC(15)
446SEXC(14)
447SEXC(13)
448SEXC(12)
449SEXC(11)
450SEXC(10)
451SEXC(9)
452SEXC(8)
453SEXC(7)
454SEXC(6)
455SEXC(5)
456SEXC(4)
457SEXC(3)
458SEXC(2)
459SEXC(1)
460
461s_exc_p1:
462	jr	ra
463	 ADD	len, len, 1
464s_exc:
465	jr	ra
466	 nop
467
468	.align	5
469LEAF(memmove)
470EXPORT_SYMBOL(memmove)
471	ADD	t0, a0, a2
472	ADD	t1, a1, a2
473	sltu	t0, a1, t0			# dst + len <= src -> memcpy
474	sltu	t1, a0, t1			# dst >= src + len -> memcpy
475	and	t0, t1
476	beqz	t0, __memcpy
477	 move	v0, a0				/* return value */
478	beqz	a2, r_out
479	END(memmove)
480
481	/* fall through to __rmemcpy */
482LEAF(__rmemcpy)					/* a0=dst a1=src a2=len */
483	 sltu	t0, a1, a0
484	beqz	t0, r_end_bytes_up		# src >= dst
485	 nop
486	ADD	a0, a2				# dst = dst + len
487	ADD	a1, a2				# src = src + len
488
489r_end_bytes:
490	lb	t0, -1(a1)
491	SUB	a2, a2, 0x1
492	sb	t0, -1(a0)
493	SUB	a1, a1, 0x1
494	bnez	a2, r_end_bytes
495	 SUB	a0, a0, 0x1
496
497r_out:
498	jr	ra
499	 move	a2, zero
500
501r_end_bytes_up:
502	lb	t0, (a1)
503	SUB	a2, a2, 0x1
504	sb	t0, (a0)
505	ADD	a1, a1, 0x1
506	bnez	a2, r_end_bytes_up
507	 ADD	a0, a0, 0x1
508
509	jr	ra
510	 move	a2, zero
511	END(__rmemcpy)
512