xref: /openbmc/linux/arch/loongarch/lib/memset.S (revision 3db55767)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5
6#include <asm/alternative-asm.h>
7#include <asm/asm.h>
8#include <asm/asmmacro.h>
9#include <asm/cpu.h>
10#include <asm/export.h>
11#include <asm/regdef.h>
12
13.macro fill_to_64 r0
14	bstrins.d \r0, \r0, 15, 8
15	bstrins.d \r0, \r0, 31, 16
16	bstrins.d \r0, \r0, 63, 32
17.endm
18
19SYM_FUNC_START(memset)
20	/*
21	 * Some CPUs support hardware unaligned access
22	 */
23	ALTERNATIVE	"b __memset_generic", \
24			"b __memset_fast", CPU_FEATURE_UAL
25SYM_FUNC_END(memset)
26_ASM_NOKPROBE(memset)
27
28EXPORT_SYMBOL(memset)
29
30/*
31 * void *__memset_generic(void *s, int c, size_t n)
32 *
33 * a0: s
34 * a1: c
35 * a2: n
36 */
37SYM_FUNC_START(__memset_generic)
38	move	a3, a0
39	beqz	a2, 2f
40
411:	st.b	a1, a0, 0
42	addi.d	a0, a0, 1
43	addi.d	a2, a2, -1
44	bgt	a2, zero, 1b
45
462:	move	a0, a3
47	jr	ra
48SYM_FUNC_END(__memset_generic)
49_ASM_NOKPROBE(__memset_generic)
50
51/*
52 * void *__memset_fast(void *s, int c, size_t n)
53 *
54 * a0: s
55 * a1: c
56 * a2: n
57 */
58SYM_FUNC_START(__memset_fast)
59	/* fill a1 to 64 bits */
60	fill_to_64 a1
61
62	sltui	t0, a2, 9
63	bnez	t0, .Lsmall
64
65	add.d	a2, a0, a2
66	st.d	a1, a0, 0
67
68	/* align up address */
69	addi.d	a3, a0, 8
70	bstrins.d	a3, zero, 2, 0
71
72	addi.d	a4, a2, -64
73	bgeu	a3, a4, .Llt64
74
75	/* set 64 bytes at a time */
76.Lloop64:
77	st.d	a1, a3, 0
78	st.d	a1, a3, 8
79	st.d	a1, a3, 16
80	st.d	a1, a3, 24
81	st.d	a1, a3, 32
82	st.d	a1, a3, 40
83	st.d	a1, a3, 48
84	st.d	a1, a3, 56
85	addi.d	a3, a3, 64
86	bltu	a3, a4, .Lloop64
87
88	/* set the remaining bytes */
89.Llt64:
90	addi.d	a4, a2, -32
91	bgeu	a3, a4, .Llt32
92	st.d	a1, a3, 0
93	st.d	a1, a3, 8
94	st.d	a1, a3, 16
95	st.d	a1, a3, 24
96	addi.d	a3, a3, 32
97
98.Llt32:
99	addi.d	a4, a2, -16
100	bgeu	a3, a4, .Llt16
101	st.d	a1, a3, 0
102	st.d	a1, a3, 8
103	addi.d	a3, a3, 16
104
105.Llt16:
106	addi.d	a4, a2, -8
107	bgeu	a3, a4, .Llt8
108	st.d	a1, a3, 0
109
110.Llt8:
111	st.d	a1, a2, -8
112
113	/* return */
114	jr	ra
115
116	.align	4
117.Lsmall:
118	pcaddi	t0, 4
119	slli.d	a2, a2, 4
120	add.d	t0, t0, a2
121	jr	t0
122
123	.align	4
1240:	jr	ra
125
126	.align	4
1271:	st.b	a1, a0, 0
128	jr	ra
129
130	.align	4
1312:	st.h	a1, a0, 0
132	jr	ra
133
134	.align	4
1353:	st.h	a1, a0, 0
136	st.b	a1, a0, 2
137	jr	ra
138
139	.align	4
1404:	st.w	a1, a0, 0
141	jr	ra
142
143	.align	4
1445:	st.w	a1, a0, 0
145	st.b	a1, a0, 4
146	jr	ra
147
148	.align	4
1496:	st.w	a1, a0, 0
150	st.h	a1, a0, 4
151	jr	ra
152
153	.align	4
1547:	st.w	a1, a0, 0
155	st.w	a1, a0, 3
156	jr	ra
157
158	.align	4
1598:	st.d	a1, a0, 0
160	jr	ra
161SYM_FUNC_END(__memset_fast)
162_ASM_NOKPROBE(__memset_fast)
163