xref: /openbmc/linux/arch/sparc/net/bpf_jit_asm_32.S (revision 4da722ca19f30f7db250db808d1ab1703607a932)
1#include <asm/ptrace.h>
2
3#include "bpf_jit_32.h"
4
5#define SAVE_SZ		96
6#define SCRATCH_OFF	72
7#define BE_PTR(label)	be label
8#define SIGN_EXTEND(reg)
9
10#define SKF_MAX_NEG_OFF	(-0x200000) /* SKF_LL_OFF from filter.h */
11
12	.text
13	.globl	bpf_jit_load_word
14bpf_jit_load_word:
15	cmp	r_OFF, 0
16	bl	bpf_slow_path_word_neg
17	 nop
18	.globl	bpf_jit_load_word_positive_offset
19bpf_jit_load_word_positive_offset:
20	sub	r_HEADLEN, r_OFF, r_TMP
21	cmp	r_TMP, 3
22	ble	bpf_slow_path_word
23	 add	r_SKB_DATA, r_OFF, r_TMP
24	andcc	r_TMP, 3, %g0
25	bne	load_word_unaligned
26	 nop
27	retl
28	 ld	[r_TMP], r_A
29load_word_unaligned:
30	ldub	[r_TMP + 0x0], r_OFF
31	ldub	[r_TMP + 0x1], r_TMP2
32	sll	r_OFF, 8, r_OFF
33	or	r_OFF, r_TMP2, r_OFF
34	ldub	[r_TMP + 0x2], r_TMP2
35	sll	r_OFF, 8, r_OFF
36	or	r_OFF, r_TMP2, r_OFF
37	ldub	[r_TMP + 0x3], r_TMP2
38	sll	r_OFF, 8, r_OFF
39	retl
40	 or	r_OFF, r_TMP2, r_A
41
42	.globl	bpf_jit_load_half
43bpf_jit_load_half:
44	cmp	r_OFF, 0
45	bl	bpf_slow_path_half_neg
46	 nop
47	.globl	bpf_jit_load_half_positive_offset
48bpf_jit_load_half_positive_offset:
49	sub	r_HEADLEN, r_OFF, r_TMP
50	cmp	r_TMP, 1
51	ble	bpf_slow_path_half
52	 add	r_SKB_DATA, r_OFF, r_TMP
53	andcc	r_TMP, 1, %g0
54	bne	load_half_unaligned
55	 nop
56	retl
57	 lduh	[r_TMP], r_A
58load_half_unaligned:
59	ldub	[r_TMP + 0x0], r_OFF
60	ldub	[r_TMP + 0x1], r_TMP2
61	sll	r_OFF, 8, r_OFF
62	retl
63	 or	r_OFF, r_TMP2, r_A
64
65	.globl	bpf_jit_load_byte
66bpf_jit_load_byte:
67	cmp	r_OFF, 0
68	bl	bpf_slow_path_byte_neg
69	 nop
70	.globl	bpf_jit_load_byte_positive_offset
71bpf_jit_load_byte_positive_offset:
72	cmp	r_OFF, r_HEADLEN
73	bge	bpf_slow_path_byte
74	 nop
75	retl
76	 ldub	[r_SKB_DATA + r_OFF], r_A
77
78	.globl	bpf_jit_load_byte_msh
79bpf_jit_load_byte_msh:
80	cmp	r_OFF, 0
81	bl	bpf_slow_path_byte_msh_neg
82	 nop
83	.globl	bpf_jit_load_byte_msh_positive_offset
84bpf_jit_load_byte_msh_positive_offset:
85	cmp	r_OFF, r_HEADLEN
86	bge	bpf_slow_path_byte_msh
87	 nop
88	ldub	[r_SKB_DATA + r_OFF], r_OFF
89	and	r_OFF, 0xf, r_OFF
90	retl
91	 sll	r_OFF, 2, r_X
92
93#define bpf_slow_path_common(LEN)	\
94	save	%sp, -SAVE_SZ, %sp;	\
95	mov	%i0, %o0;		\
96	mov	r_OFF, %o1;		\
97	add	%fp, SCRATCH_OFF, %o2;	\
98	call	skb_copy_bits;		\
99	 mov	(LEN), %o3;		\
100	cmp	%o0, 0;			\
101	restore;
102
103bpf_slow_path_word:
104	bpf_slow_path_common(4)
105	bl	bpf_error
106	 ld	[%sp + SCRATCH_OFF], r_A
107	retl
108	 nop
109bpf_slow_path_half:
110	bpf_slow_path_common(2)
111	bl	bpf_error
112	 lduh	[%sp + SCRATCH_OFF], r_A
113	retl
114	 nop
115bpf_slow_path_byte:
116	bpf_slow_path_common(1)
117	bl	bpf_error
118	 ldub	[%sp + SCRATCH_OFF], r_A
119	retl
120	 nop
121bpf_slow_path_byte_msh:
122	bpf_slow_path_common(1)
123	bl	bpf_error
124	 ldub	[%sp + SCRATCH_OFF], r_A
125	and	r_OFF, 0xf, r_OFF
126	retl
127	 sll	r_OFF, 2, r_X
128
129#define bpf_negative_common(LEN)			\
130	save	%sp, -SAVE_SZ, %sp;			\
131	mov	%i0, %o0;				\
132	mov	r_OFF, %o1;				\
133	SIGN_EXTEND(%o1);				\
134	call	bpf_internal_load_pointer_neg_helper;	\
135	 mov	(LEN), %o2;				\
136	mov	%o0, r_TMP;				\
137	cmp	%o0, 0;					\
138	BE_PTR(bpf_error);				\
139	 restore;
140
141bpf_slow_path_word_neg:
142	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
143	cmp	r_OFF, r_TMP
144	bl	bpf_error
145	 nop
146	.globl	bpf_jit_load_word_negative_offset
147bpf_jit_load_word_negative_offset:
148	bpf_negative_common(4)
149	andcc	r_TMP, 3, %g0
150	bne	load_word_unaligned
151	 nop
152	retl
153	 ld	[r_TMP], r_A
154
155bpf_slow_path_half_neg:
156	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
157	cmp	r_OFF, r_TMP
158	bl	bpf_error
159	 nop
160	.globl	bpf_jit_load_half_negative_offset
161bpf_jit_load_half_negative_offset:
162	bpf_negative_common(2)
163	andcc	r_TMP, 1, %g0
164	bne	load_half_unaligned
165	 nop
166	retl
167	 lduh	[r_TMP], r_A
168
169bpf_slow_path_byte_neg:
170	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
171	cmp	r_OFF, r_TMP
172	bl	bpf_error
173	 nop
174	.globl	bpf_jit_load_byte_negative_offset
175bpf_jit_load_byte_negative_offset:
176	bpf_negative_common(1)
177	retl
178	 ldub	[r_TMP], r_A
179
180bpf_slow_path_byte_msh_neg:
181	sethi	%hi(SKF_MAX_NEG_OFF), r_TMP
182	cmp	r_OFF, r_TMP
183	bl	bpf_error
184	 nop
185	.globl	bpf_jit_load_byte_msh_negative_offset
186bpf_jit_load_byte_msh_negative_offset:
187	bpf_negative_common(1)
188	ldub	[r_TMP], r_OFF
189	and	r_OFF, 0xf, r_OFF
190	retl
191	 sll	r_OFF, 2, r_X
192
193bpf_error:
194	/* Make the JIT program return zero.  The JIT epilogue
195	 * stores away the original %o7 into r_saved_O7.  The
196	 * normal leaf function return is to use "retl" which
197	 * would evalute to "jmpl %o7 + 8, %g0" but we want to
198	 * use the saved value thus the sequence you see here.
199	 */
200	jmpl	r_saved_O7 + 8, %g0
201	 clr	%o0
202