xref: /openbmc/linux/arch/xtensa/lib/strncpy_user.S (revision 338d9150)
1249ac17eSChris Zankel/*
2249ac17eSChris Zankel *  arch/xtensa/lib/strncpy_user.S
3249ac17eSChris Zankel *
4249ac17eSChris Zankel *  This file is subject to the terms and conditions of the GNU General
5249ac17eSChris Zankel *  Public License.  See the file "COPYING" in the main directory of
6249ac17eSChris Zankel *  this archive for more details.
7249ac17eSChris Zankel *
8249ac17eSChris Zankel *  Returns: -EFAULT if exception before terminator, N if the entire
9249ac17eSChris Zankel *  buffer filled, else strlen.
10249ac17eSChris Zankel *
11249ac17eSChris Zankel *  Copyright (C) 2002 Tensilica Inc.
12249ac17eSChris Zankel */
13249ac17eSChris Zankel
14249ac17eSChris Zankel#include <linux/errno.h>
155cf97ebdSMax Filippov#include <linux/linkage.h>
160013acebSMax Filippov#include <asm/asmmacro.h>
178f8d5745SMax Filippov#include <asm/core.h>
18249ac17eSChris Zankel
19249ac17eSChris Zankel/*
20249ac17eSChris Zankel * char *__strncpy_user(char *dst, const char *src, size_t len)
21249ac17eSChris Zankel */
22a0bb46baSChris Zankel
23a0bb46baSChris Zankel#ifdef __XTENSA_EB__
24a0bb46baSChris Zankel# define MASK0 0xff000000
25a0bb46baSChris Zankel# define MASK1 0x00ff0000
26a0bb46baSChris Zankel# define MASK2 0x0000ff00
27a0bb46baSChris Zankel# define MASK3 0x000000ff
28a0bb46baSChris Zankel#else
29a0bb46baSChris Zankel# define MASK0 0x000000ff
30a0bb46baSChris Zankel# define MASK1 0x0000ff00
31a0bb46baSChris Zankel# define MASK2 0x00ff0000
32a0bb46baSChris Zankel# define MASK3 0xff000000
33a0bb46baSChris Zankel#endif
34249ac17eSChris Zankel
35249ac17eSChris Zankel# Register use
36249ac17eSChris Zankel#   a0/ return address
37249ac17eSChris Zankel#   a1/ stack pointer
38249ac17eSChris Zankel#   a2/ return value
39249ac17eSChris Zankel#   a3/ src
40249ac17eSChris Zankel#   a4/ len
41249ac17eSChris Zankel#   a5/ mask0
42249ac17eSChris Zankel#   a6/ mask1
43249ac17eSChris Zankel#   a7/ mask2
44249ac17eSChris Zankel#   a8/ mask3
45249ac17eSChris Zankel#   a9/ tmp
46249ac17eSChris Zankel#   a10/ tmp
47249ac17eSChris Zankel#   a11/ dst
48249ac17eSChris Zankel
49a0bb46baSChris Zankel.text
505cf97ebdSMax FilippovENTRY(__strncpy_user)
515cf97ebdSMax Filippov
52d6d5f19eSMax Filippov	abi_entry_default
53249ac17eSChris Zankel	# a2/ dst, a3/ src, a4/ len
54249ac17eSChris Zankel	mov	a11, a2		# leave dst in return value register
55249ac17eSChris Zankel	beqz	a4, .Lret	# if len is zero
56a0bb46baSChris Zankel	movi	a5, MASK0	# mask for byte 0
57a0bb46baSChris Zankel	movi	a6, MASK1	# mask for byte 1
58a0bb46baSChris Zankel	movi	a7, MASK2	# mask for byte 2
59a0bb46baSChris Zankel	movi	a8, MASK3	# mask for byte 3
60249ac17eSChris Zankel	bbsi.l	a3, 0, .Lsrc1mod2 # if only  8-bit aligned
61249ac17eSChris Zankel	bbsi.l	a3, 1, .Lsrc2mod4 # if only 16-bit aligned
62249ac17eSChris Zankel.Lsrcaligned:	# return here when src is word-aligned
63d191323bSMax Filippov	srli	a10, a4, 2	# number of loop iterations with 4B per loop
64249ac17eSChris Zankel	movi	a9, 3
65249ac17eSChris Zankel	bnone	a11, a9, .Laligned
66249ac17eSChris Zankel	j	.Ldstunaligned
67249ac17eSChris Zankel
68249ac17eSChris Zankel.Lsrc1mod2:	# src address is odd
690013acebSMax FilippovEX(11f)	l8ui	a9, a3, 0		# get byte 0
70249ac17eSChris Zankel	addi	a3, a3, 1		# advance src pointer
710013acebSMax FilippovEX(10f)	s8i	a9, a11, 0		# store byte 0
72249ac17eSChris Zankel	beqz	a9, .Lret		# if byte 0 is zero
73249ac17eSChris Zankel	addi	a11, a11, 1		# advance dst pointer
74249ac17eSChris Zankel	addi	a4, a4, -1		# decrement len
75249ac17eSChris Zankel	beqz	a4, .Lret		# if len is zero
76249ac17eSChris Zankel	bbci.l	a3, 1, .Lsrcaligned	# if src is now word-aligned
77249ac17eSChris Zankel
78249ac17eSChris Zankel.Lsrc2mod4:	# src address is 2 mod 4
790013acebSMax FilippovEX(11f)	l8ui	a9, a3, 0		# get byte 0
80249ac17eSChris Zankel	/* 1-cycle interlock */
810013acebSMax FilippovEX(10f)	s8i	a9, a11, 0		# store byte 0
82249ac17eSChris Zankel	beqz	a9, .Lret		# if byte 0 is zero
83249ac17eSChris Zankel	addi	a11, a11, 1		# advance dst pointer
84249ac17eSChris Zankel	addi	a4, a4, -1		# decrement len
85249ac17eSChris Zankel	beqz	a4, .Lret		# if len is zero
860013acebSMax FilippovEX(11f)	l8ui	a9, a3, 1		# get byte 0
87249ac17eSChris Zankel	addi	a3, a3, 2		# advance src pointer
880013acebSMax FilippovEX(10f)	s8i	a9, a11, 0		# store byte 0
89249ac17eSChris Zankel	beqz	a9, .Lret		# if byte 0 is zero
90249ac17eSChris Zankel	addi	a11, a11, 1		# advance dst pointer
91249ac17eSChris Zankel	addi	a4, a4, -1		# decrement len
92249ac17eSChris Zankel	bnez	a4, .Lsrcaligned	# if len is nonzero
93249ac17eSChris Zankel.Lret:
94249ac17eSChris Zankel	sub	a2, a11, a2		# compute strlen
95d6d5f19eSMax Filippov	abi_ret_default
96249ac17eSChris Zankel
97249ac17eSChris Zankel/*
98249ac17eSChris Zankel * dst is word-aligned, src is word-aligned
99249ac17eSChris Zankel */
100249ac17eSChris Zankel	.align	4		# 1 mod 4 alignment for LOOPNEZ
101249ac17eSChris Zankel	.byte	0		# (0 mod 4 alignment for LBEG)
102249ac17eSChris Zankel.Laligned:
103249ac17eSChris Zankel#if XCHAL_HAVE_LOOPS
104d191323bSMax Filippov	loopnez	a10, .Loop1done
105249ac17eSChris Zankel#else
106d191323bSMax Filippov	beqz	a10, .Loop1done
107d191323bSMax Filippov	slli	a10, a10, 2
108d191323bSMax Filippov	add	a10, a10, a11	# a10 = end of last 4B chunck
109249ac17eSChris Zankel#endif
110249ac17eSChris Zankel.Loop1:
1110013acebSMax FilippovEX(11f)	l32i	a9, a3, 0		# get word from src
112249ac17eSChris Zankel	addi	a3, a3, 4		# advance src pointer
113249ac17eSChris Zankel	bnone	a9, a5, .Lz0		# if byte 0 is zero
114249ac17eSChris Zankel	bnone	a9, a6, .Lz1		# if byte 1 is zero
115249ac17eSChris Zankel	bnone	a9, a7, .Lz2		# if byte 2 is zero
1160013acebSMax FilippovEX(10f)	s32i	a9, a11, 0		# store word to dst
117249ac17eSChris Zankel	bnone	a9, a8, .Lz3		# if byte 3 is zero
118249ac17eSChris Zankel	addi	a11, a11, 4		# advance dst pointer
119249ac17eSChris Zankel#if !XCHAL_HAVE_LOOPS
120d191323bSMax Filippov	blt	a11, a10, .Loop1
121249ac17eSChris Zankel#endif
122249ac17eSChris Zankel
123249ac17eSChris Zankel.Loop1done:
124249ac17eSChris Zankel	bbci.l	a4, 1, .L100
125249ac17eSChris Zankel	# copy 2 bytes
1260013acebSMax FilippovEX(11f)	l16ui	a9, a3, 0
127249ac17eSChris Zankel	addi	a3, a3, 2		# advance src pointer
128249ac17eSChris Zankel#ifdef __XTENSA_EB__
129249ac17eSChris Zankel	bnone	a9, a7, .Lz0		# if byte 2 is zero
130249ac17eSChris Zankel	bnone	a9, a8, .Lz1		# if byte 3 is zero
131249ac17eSChris Zankel#else
132249ac17eSChris Zankel	bnone	a9, a5, .Lz0		# if byte 0 is zero
133249ac17eSChris Zankel	bnone	a9, a6, .Lz1		# if byte 1 is zero
134249ac17eSChris Zankel#endif
1350013acebSMax FilippovEX(10f)	s16i	a9, a11, 0
136249ac17eSChris Zankel	addi	a11, a11, 2		# advance dst pointer
137249ac17eSChris Zankel.L100:
138249ac17eSChris Zankel	bbci.l	a4, 0, .Lret
1390013acebSMax FilippovEX(11f)	l8ui	a9, a3, 0
140249ac17eSChris Zankel	/* slot */
1410013acebSMax FilippovEX(10f)	s8i	a9, a11, 0
142249ac17eSChris Zankel	beqz	a9, .Lret		# if byte is zero
143249ac17eSChris Zankel	addi	a11, a11, 1-3		# advance dst ptr 1, but also cancel
144249ac17eSChris Zankel					# the effect of adding 3 in .Lz3 code
145249ac17eSChris Zankel	/* fall thru to .Lz3 and "retw" */
146249ac17eSChris Zankel
147249ac17eSChris Zankel.Lz3:	# byte 3 is zero
148249ac17eSChris Zankel	addi	a11, a11, 3		# advance dst pointer
149249ac17eSChris Zankel	sub	a2, a11, a2		# compute strlen
150d6d5f19eSMax Filippov	abi_ret_default
151249ac17eSChris Zankel.Lz0:	# byte 0 is zero
152249ac17eSChris Zankel#ifdef __XTENSA_EB__
153249ac17eSChris Zankel	movi	a9, 0
154249ac17eSChris Zankel#endif /* __XTENSA_EB__ */
1550013acebSMax FilippovEX(10f)	s8i	a9, a11, 0
156249ac17eSChris Zankel	sub	a2, a11, a2		# compute strlen
157d6d5f19eSMax Filippov	abi_ret_default
158249ac17eSChris Zankel.Lz1:	# byte 1 is zero
159249ac17eSChris Zankel#ifdef __XTENSA_EB__
160249ac17eSChris Zankel	extui   a9, a9, 16, 16
161249ac17eSChris Zankel#endif /* __XTENSA_EB__ */
1620013acebSMax FilippovEX(10f)	s16i	a9, a11, 0
163249ac17eSChris Zankel	addi	a11, a11, 1		# advance dst pointer
164249ac17eSChris Zankel	sub	a2, a11, a2		# compute strlen
165d6d5f19eSMax Filippov	abi_ret_default
166249ac17eSChris Zankel.Lz2:	# byte 2 is zero
167249ac17eSChris Zankel#ifdef __XTENSA_EB__
168249ac17eSChris Zankel	extui   a9, a9, 16, 16
169249ac17eSChris Zankel#endif /* __XTENSA_EB__ */
1700013acebSMax FilippovEX(10f)	s16i	a9, a11, 0
171249ac17eSChris Zankel	movi	a9, 0
1720013acebSMax FilippovEX(10f)	s8i	a9, a11, 2
173249ac17eSChris Zankel	addi	a11, a11, 2		# advance dst pointer
174249ac17eSChris Zankel	sub	a2, a11, a2		# compute strlen
175d6d5f19eSMax Filippov	abi_ret_default
176249ac17eSChris Zankel
177249ac17eSChris Zankel	.align	4		# 1 mod 4 alignment for LOOPNEZ
178249ac17eSChris Zankel	.byte	0		# (0 mod 4 alignment for LBEG)
179249ac17eSChris Zankel.Ldstunaligned:
180249ac17eSChris Zankel/*
181249ac17eSChris Zankel * for now just use byte copy loop
182249ac17eSChris Zankel */
183249ac17eSChris Zankel#if XCHAL_HAVE_LOOPS
184249ac17eSChris Zankel	loopnez	a4, .Lunalignedend
185249ac17eSChris Zankel#else
186249ac17eSChris Zankel	beqz	a4, .Lunalignedend
187d191323bSMax Filippov	add	a10, a11, a4		# a10 = ending address
188249ac17eSChris Zankel#endif /* XCHAL_HAVE_LOOPS */
189249ac17eSChris Zankel.Lnextbyte:
1900013acebSMax FilippovEX(11f)	l8ui	a9, a3, 0
191249ac17eSChris Zankel	addi	a3, a3, 1
1920013acebSMax FilippovEX(10f)	s8i	a9, a11, 0
193249ac17eSChris Zankel	beqz	a9, .Lunalignedend
194249ac17eSChris Zankel	addi	a11, a11, 1
195249ac17eSChris Zankel#if !XCHAL_HAVE_LOOPS
196d191323bSMax Filippov	blt	a11, a10, .Lnextbyte
197249ac17eSChris Zankel#endif
198249ac17eSChris Zankel
199249ac17eSChris Zankel.Lunalignedend:
200249ac17eSChris Zankel	sub	a2, a11, a2		# compute strlen
201d6d5f19eSMax Filippov	abi_ret_default
202249ac17eSChris Zankel
2035cf97ebdSMax FilippovENDPROC(__strncpy_user)
204*338d9150SMax FilippovEXPORT_SYMBOL(__strncpy_user)
205249ac17eSChris Zankel
206249ac17eSChris Zankel	.section .fixup, "ax"
207249ac17eSChris Zankel	.align	4
208249ac17eSChris Zankel
209249ac17eSChris Zankel	/* For now, just return -EFAULT.  Future implementations might
210249ac17eSChris Zankel	 * like to clear remaining kernel space, like the fixup
211249ac17eSChris Zankel	 * implementation in memset().  Thus, we differentiate between
212249ac17eSChris Zankel	 * load/store fixups. */
213249ac17eSChris Zankel
2140013acebSMax Filippov10:
2150013acebSMax Filippov11:
216249ac17eSChris Zankel	movi	a2, -EFAULT
217d6d5f19eSMax Filippov	abi_ret_default
218