1f4fe3cfeSEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2f4fe3cfeSEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/spill_fill.c */
3f4fe3cfeSEduard Zingerman
4f4fe3cfeSEduard Zingerman #include <linux/bpf.h>
5f4fe3cfeSEduard Zingerman #include <bpf/bpf_helpers.h>
6f4fe3cfeSEduard Zingerman #include "bpf_misc.h"
7f4fe3cfeSEduard Zingerman
8f4fe3cfeSEduard Zingerman struct {
9f4fe3cfeSEduard Zingerman __uint(type, BPF_MAP_TYPE_RINGBUF);
10f4fe3cfeSEduard Zingerman __uint(max_entries, 4096);
11f4fe3cfeSEduard Zingerman } map_ringbuf SEC(".maps");
12f4fe3cfeSEduard Zingerman
13f4fe3cfeSEduard Zingerman SEC("socket")
14f4fe3cfeSEduard Zingerman __description("check valid spill/fill")
15f4fe3cfeSEduard Zingerman __success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(POINTER_VALUE)16f4fe3cfeSEduard Zingerman __retval(POINTER_VALUE)
17f4fe3cfeSEduard Zingerman __naked void check_valid_spill_fill(void)
18f4fe3cfeSEduard Zingerman {
19f4fe3cfeSEduard Zingerman asm volatile (" \
20f4fe3cfeSEduard Zingerman /* spill R1(ctx) into stack */ \
21f4fe3cfeSEduard Zingerman *(u64*)(r10 - 8) = r1; \
22f4fe3cfeSEduard Zingerman /* fill it back into R2 */ \
23f4fe3cfeSEduard Zingerman r2 = *(u64*)(r10 - 8); \
24f4fe3cfeSEduard Zingerman /* should be able to access R0 = *(R2 + 8) */ \
25f4fe3cfeSEduard Zingerman /* BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 8), */\
26f4fe3cfeSEduard Zingerman r0 = r2; \
27f4fe3cfeSEduard Zingerman exit; \
28f4fe3cfeSEduard Zingerman " ::: __clobber_all);
29f4fe3cfeSEduard Zingerman }
30f4fe3cfeSEduard Zingerman
31f4fe3cfeSEduard Zingerman SEC("socket")
32f4fe3cfeSEduard Zingerman __description("check valid spill/fill, skb mark")
33f4fe3cfeSEduard Zingerman __success __success_unpriv __retval(0)
valid_spill_fill_skb_mark(void)34f4fe3cfeSEduard Zingerman __naked void valid_spill_fill_skb_mark(void)
35f4fe3cfeSEduard Zingerman {
36f4fe3cfeSEduard Zingerman asm volatile (" \
37f4fe3cfeSEduard Zingerman r6 = r1; \
38f4fe3cfeSEduard Zingerman *(u64*)(r10 - 8) = r6; \
39f4fe3cfeSEduard Zingerman r0 = *(u64*)(r10 - 8); \
40f4fe3cfeSEduard Zingerman r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
41f4fe3cfeSEduard Zingerman exit; \
42f4fe3cfeSEduard Zingerman " :
43f4fe3cfeSEduard Zingerman : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
44f4fe3cfeSEduard Zingerman : __clobber_all);
45f4fe3cfeSEduard Zingerman }
46f4fe3cfeSEduard Zingerman
47f4fe3cfeSEduard Zingerman SEC("socket")
48f4fe3cfeSEduard Zingerman __description("check valid spill/fill, ptr to mem")
49f4fe3cfeSEduard Zingerman __success __success_unpriv __retval(0)
spill_fill_ptr_to_mem(void)50f4fe3cfeSEduard Zingerman __naked void spill_fill_ptr_to_mem(void)
51f4fe3cfeSEduard Zingerman {
52f4fe3cfeSEduard Zingerman asm volatile (" \
53f4fe3cfeSEduard Zingerman /* reserve 8 byte ringbuf memory */ \
54f4fe3cfeSEduard Zingerman r1 = 0; \
55f4fe3cfeSEduard Zingerman *(u64*)(r10 - 8) = r1; \
56f4fe3cfeSEduard Zingerman r1 = %[map_ringbuf] ll; \
57f4fe3cfeSEduard Zingerman r2 = 8; \
58f4fe3cfeSEduard Zingerman r3 = 0; \
59f4fe3cfeSEduard Zingerman call %[bpf_ringbuf_reserve]; \
60f4fe3cfeSEduard Zingerman /* store a pointer to the reserved memory in R6 */\
61f4fe3cfeSEduard Zingerman r6 = r0; \
62f4fe3cfeSEduard Zingerman /* check whether the reservation was successful */\
63f4fe3cfeSEduard Zingerman if r0 == 0 goto l0_%=; \
64f4fe3cfeSEduard Zingerman /* spill R6(mem) into the stack */ \
65f4fe3cfeSEduard Zingerman *(u64*)(r10 - 8) = r6; \
66f4fe3cfeSEduard Zingerman /* fill it back in R7 */ \
67f4fe3cfeSEduard Zingerman r7 = *(u64*)(r10 - 8); \
68f4fe3cfeSEduard Zingerman /* should be able to access *(R7) = 0 */ \
69f4fe3cfeSEduard Zingerman r1 = 0; \
70f4fe3cfeSEduard Zingerman *(u64*)(r7 + 0) = r1; \
71f4fe3cfeSEduard Zingerman /* submit the reserved ringbuf memory */ \
72f4fe3cfeSEduard Zingerman r1 = r7; \
73f4fe3cfeSEduard Zingerman r2 = 0; \
74f4fe3cfeSEduard Zingerman call %[bpf_ringbuf_submit]; \
75f4fe3cfeSEduard Zingerman l0_%=: r0 = 0; \
76f4fe3cfeSEduard Zingerman exit; \
77f4fe3cfeSEduard Zingerman " :
78f4fe3cfeSEduard Zingerman : __imm(bpf_ringbuf_reserve),
79f4fe3cfeSEduard Zingerman __imm(bpf_ringbuf_submit),
80f4fe3cfeSEduard Zingerman __imm_addr(map_ringbuf)
81f4fe3cfeSEduard Zingerman : __clobber_all);
82f4fe3cfeSEduard Zingerman }
83f4fe3cfeSEduard Zingerman
84f4fe3cfeSEduard Zingerman SEC("socket")
85f4fe3cfeSEduard Zingerman __description("check with invalid reg offset 0")
86f4fe3cfeSEduard Zingerman __failure __msg("R0 pointer arithmetic on ringbuf_mem_or_null prohibited")
87f4fe3cfeSEduard Zingerman __failure_unpriv
with_invalid_reg_offset_0(void)88f4fe3cfeSEduard Zingerman __naked void with_invalid_reg_offset_0(void)
89f4fe3cfeSEduard Zingerman {
90f4fe3cfeSEduard Zingerman asm volatile (" \
91f4fe3cfeSEduard Zingerman /* reserve 8 byte ringbuf memory */ \
92f4fe3cfeSEduard Zingerman r1 = 0; \
93f4fe3cfeSEduard Zingerman *(u64*)(r10 - 8) = r1; \
94f4fe3cfeSEduard Zingerman r1 = %[map_ringbuf] ll; \
95f4fe3cfeSEduard Zingerman r2 = 8; \
96f4fe3cfeSEduard Zingerman r3 = 0; \
97f4fe3cfeSEduard Zingerman call %[bpf_ringbuf_reserve]; \
98f4fe3cfeSEduard Zingerman /* store a pointer to the reserved memory in R6 */\
99f4fe3cfeSEduard Zingerman r6 = r0; \
100f4fe3cfeSEduard Zingerman /* add invalid offset to memory or NULL */ \
101f4fe3cfeSEduard Zingerman r0 += 1; \
102f4fe3cfeSEduard Zingerman /* check whether the reservation was successful */\
103f4fe3cfeSEduard Zingerman if r0 == 0 goto l0_%=; \
104f4fe3cfeSEduard Zingerman /* should not be able to access *(R7) = 0 */ \
105f4fe3cfeSEduard Zingerman r1 = 0; \
106f4fe3cfeSEduard Zingerman *(u32*)(r6 + 0) = r1; \
107f4fe3cfeSEduard Zingerman /* submit the reserved ringbuf memory */ \
108f4fe3cfeSEduard Zingerman r1 = r6; \
109f4fe3cfeSEduard Zingerman r2 = 0; \
110f4fe3cfeSEduard Zingerman call %[bpf_ringbuf_submit]; \
111f4fe3cfeSEduard Zingerman l0_%=: r0 = 0; \
112f4fe3cfeSEduard Zingerman exit; \
113f4fe3cfeSEduard Zingerman " :
114f4fe3cfeSEduard Zingerman : __imm(bpf_ringbuf_reserve),
115f4fe3cfeSEduard Zingerman __imm(bpf_ringbuf_submit),
116f4fe3cfeSEduard Zingerman __imm_addr(map_ringbuf)
117f4fe3cfeSEduard Zingerman : __clobber_all);
118f4fe3cfeSEduard Zingerman }
119f4fe3cfeSEduard Zingerman
120f4fe3cfeSEduard Zingerman SEC("socket")
121f4fe3cfeSEduard Zingerman __description("check corrupted spill/fill")
122f4fe3cfeSEduard Zingerman __failure __msg("R0 invalid mem access 'scalar'")
123f4fe3cfeSEduard Zingerman __msg_unpriv("attempt to corrupt spilled")
__flag(BPF_F_ANY_ALIGNMENT)124f4fe3cfeSEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
125f4fe3cfeSEduard Zingerman __naked void check_corrupted_spill_fill(void)
126f4fe3cfeSEduard Zingerman {
127f4fe3cfeSEduard Zingerman asm volatile (" \
128f4fe3cfeSEduard Zingerman /* spill R1(ctx) into stack */ \
129f4fe3cfeSEduard Zingerman *(u64*)(r10 - 8) = r1; \
130f4fe3cfeSEduard Zingerman /* mess up with R1 pointer on stack */ \
131f4fe3cfeSEduard Zingerman r0 = 0x23; \
132f4fe3cfeSEduard Zingerman *(u8*)(r10 - 7) = r0; \
133f4fe3cfeSEduard Zingerman /* fill back into R0 is fine for priv. \
134f4fe3cfeSEduard Zingerman * R0 now becomes SCALAR_VALUE. \
135f4fe3cfeSEduard Zingerman */ \
136f4fe3cfeSEduard Zingerman r0 = *(u64*)(r10 - 8); \
137f4fe3cfeSEduard Zingerman /* Load from R0 should fail. */ \
138f4fe3cfeSEduard Zingerman r0 = *(u64*)(r0 + 8); \
139f4fe3cfeSEduard Zingerman exit; \
140f4fe3cfeSEduard Zingerman " ::: __clobber_all);
141f4fe3cfeSEduard Zingerman }
142f4fe3cfeSEduard Zingerman
143f4fe3cfeSEduard Zingerman SEC("socket")
144f4fe3cfeSEduard Zingerman __description("check corrupted spill/fill, LSB")
145f4fe3cfeSEduard Zingerman __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
__retval(POINTER_VALUE)146f4fe3cfeSEduard Zingerman __retval(POINTER_VALUE)
147f4fe3cfeSEduard Zingerman __naked void check_corrupted_spill_fill_lsb(void)
148f4fe3cfeSEduard Zingerman {
149f4fe3cfeSEduard Zingerman asm volatile (" \
150f4fe3cfeSEduard Zingerman *(u64*)(r10 - 8) = r1; \
151f4fe3cfeSEduard Zingerman r0 = 0xcafe; \
152f4fe3cfeSEduard Zingerman *(u16*)(r10 - 8) = r0; \
153f4fe3cfeSEduard Zingerman r0 = *(u64*)(r10 - 8); \
154f4fe3cfeSEduard Zingerman exit; \
155f4fe3cfeSEduard Zingerman " ::: __clobber_all);
156f4fe3cfeSEduard Zingerman }
157f4fe3cfeSEduard Zingerman
158f4fe3cfeSEduard Zingerman SEC("socket")
159f4fe3cfeSEduard Zingerman __description("check corrupted spill/fill, MSB")
160f4fe3cfeSEduard Zingerman __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
__retval(POINTER_VALUE)161f4fe3cfeSEduard Zingerman __retval(POINTER_VALUE)
162f4fe3cfeSEduard Zingerman __naked void check_corrupted_spill_fill_msb(void)
163f4fe3cfeSEduard Zingerman {
164f4fe3cfeSEduard Zingerman asm volatile (" \
165f4fe3cfeSEduard Zingerman *(u64*)(r10 - 8) = r1; \
166f4fe3cfeSEduard Zingerman r0 = 0x12345678; \
167f4fe3cfeSEduard Zingerman *(u32*)(r10 - 4) = r0; \
168f4fe3cfeSEduard Zingerman r0 = *(u64*)(r10 - 8); \
169f4fe3cfeSEduard Zingerman exit; \
170f4fe3cfeSEduard Zingerman " ::: __clobber_all);
171f4fe3cfeSEduard Zingerman }
172f4fe3cfeSEduard Zingerman
173f4fe3cfeSEduard Zingerman SEC("tc")
174f4fe3cfeSEduard Zingerman __description("Spill and refill a u32 const scalar. Offset to skb->data")
175f4fe3cfeSEduard Zingerman __success __retval(0)
scalar_offset_to_skb_data_1(void)176f4fe3cfeSEduard Zingerman __naked void scalar_offset_to_skb_data_1(void)
177f4fe3cfeSEduard Zingerman {
178f4fe3cfeSEduard Zingerman asm volatile (" \
179f4fe3cfeSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
180f4fe3cfeSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
181f4fe3cfeSEduard Zingerman w4 = 20; \
182f4fe3cfeSEduard Zingerman *(u32*)(r10 - 8) = r4; \
183f4fe3cfeSEduard Zingerman r4 = *(u32*)(r10 - 8); \
184f4fe3cfeSEduard Zingerman r0 = r2; \
185f4fe3cfeSEduard Zingerman /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=20 */ \
186f4fe3cfeSEduard Zingerman r0 += r4; \
187f4fe3cfeSEduard Zingerman /* if (r0 > r3) R0=pkt,off=20 R2=pkt R3=pkt_end R4=20 */\
188f4fe3cfeSEduard Zingerman if r0 > r3 goto l0_%=; \
189f4fe3cfeSEduard Zingerman /* r0 = *(u32 *)r2 R0=pkt,off=20,r=20 R2=pkt,r=20 R3=pkt_end R4=20 */\
190f4fe3cfeSEduard Zingerman r0 = *(u32*)(r2 + 0); \
191f4fe3cfeSEduard Zingerman l0_%=: r0 = 0; \
192f4fe3cfeSEduard Zingerman exit; \
193f4fe3cfeSEduard Zingerman " :
194f4fe3cfeSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
195f4fe3cfeSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
196f4fe3cfeSEduard Zingerman : __clobber_all);
197f4fe3cfeSEduard Zingerman }
198f4fe3cfeSEduard Zingerman
199f4fe3cfeSEduard Zingerman SEC("socket")
200f4fe3cfeSEduard Zingerman __description("Spill a u32 const, refill from another half of the uninit u32 from the stack")
201f4fe3cfeSEduard Zingerman /* in privileged mode reads from uninitialized stack locations are permitted */
202f4fe3cfeSEduard Zingerman __success __failure_unpriv
203f4fe3cfeSEduard Zingerman __msg_unpriv("invalid read from stack off -4+0 size 4")
204f4fe3cfeSEduard Zingerman __retval(0)
uninit_u32_from_the_stack(void)205f4fe3cfeSEduard Zingerman __naked void uninit_u32_from_the_stack(void)
206f4fe3cfeSEduard Zingerman {
207f4fe3cfeSEduard Zingerman asm volatile (" \
208f4fe3cfeSEduard Zingerman w4 = 20; \
209f4fe3cfeSEduard Zingerman *(u32*)(r10 - 8) = r4; \
210f4fe3cfeSEduard Zingerman /* r4 = *(u32 *)(r10 -4) fp-8=????rrrr*/ \
211f4fe3cfeSEduard Zingerman r4 = *(u32*)(r10 - 4); \
212f4fe3cfeSEduard Zingerman r0 = 0; \
213f4fe3cfeSEduard Zingerman exit; \
214f4fe3cfeSEduard Zingerman " ::: __clobber_all);
215f4fe3cfeSEduard Zingerman }
216f4fe3cfeSEduard Zingerman
217f4fe3cfeSEduard Zingerman SEC("tc")
218f4fe3cfeSEduard Zingerman __description("Spill a u32 const scalar. Refill as u16. Offset to skb->data")
219f4fe3cfeSEduard Zingerman __failure __msg("invalid access to packet")
u16_offset_to_skb_data(void)220f4fe3cfeSEduard Zingerman __naked void u16_offset_to_skb_data(void)
221f4fe3cfeSEduard Zingerman {
222f4fe3cfeSEduard Zingerman asm volatile (" \
223f4fe3cfeSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
224f4fe3cfeSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
225f4fe3cfeSEduard Zingerman w4 = 20; \
226f4fe3cfeSEduard Zingerman *(u32*)(r10 - 8) = r4; \
227f4fe3cfeSEduard Zingerman r4 = *(u16*)(r10 - 8); \
228f4fe3cfeSEduard Zingerman r0 = r2; \
229f4fe3cfeSEduard Zingerman /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
230f4fe3cfeSEduard Zingerman r0 += r4; \
231f4fe3cfeSEduard Zingerman /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
232f4fe3cfeSEduard Zingerman if r0 > r3 goto l0_%=; \
233f4fe3cfeSEduard Zingerman /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
234f4fe3cfeSEduard Zingerman r0 = *(u32*)(r2 + 0); \
235f4fe3cfeSEduard Zingerman l0_%=: r0 = 0; \
236f4fe3cfeSEduard Zingerman exit; \
237f4fe3cfeSEduard Zingerman " :
238f4fe3cfeSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
239f4fe3cfeSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
240f4fe3cfeSEduard Zingerman : __clobber_all);
241f4fe3cfeSEduard Zingerman }
242f4fe3cfeSEduard Zingerman
243f4fe3cfeSEduard Zingerman SEC("tc")
244f4fe3cfeSEduard Zingerman __description("Spill u32 const scalars. Refill as u64. Offset to skb->data")
245f4fe3cfeSEduard Zingerman __failure __msg("invalid access to packet")
u64_offset_to_skb_data(void)246f4fe3cfeSEduard Zingerman __naked void u64_offset_to_skb_data(void)
247f4fe3cfeSEduard Zingerman {
248f4fe3cfeSEduard Zingerman asm volatile (" \
249f4fe3cfeSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
250f4fe3cfeSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
251f4fe3cfeSEduard Zingerman w6 = 0; \
252f4fe3cfeSEduard Zingerman w7 = 20; \
253f4fe3cfeSEduard Zingerman *(u32*)(r10 - 4) = r6; \
254f4fe3cfeSEduard Zingerman *(u32*)(r10 - 8) = r7; \
255f4fe3cfeSEduard Zingerman r4 = *(u16*)(r10 - 8); \
256f4fe3cfeSEduard Zingerman r0 = r2; \
257f4fe3cfeSEduard Zingerman /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
258f4fe3cfeSEduard Zingerman r0 += r4; \
259f4fe3cfeSEduard Zingerman /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
260f4fe3cfeSEduard Zingerman if r0 > r3 goto l0_%=; \
261f4fe3cfeSEduard Zingerman /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
262f4fe3cfeSEduard Zingerman r0 = *(u32*)(r2 + 0); \
263f4fe3cfeSEduard Zingerman l0_%=: r0 = 0; \
264f4fe3cfeSEduard Zingerman exit; \
265f4fe3cfeSEduard Zingerman " :
266f4fe3cfeSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
267f4fe3cfeSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
268f4fe3cfeSEduard Zingerman : __clobber_all);
269f4fe3cfeSEduard Zingerman }
270f4fe3cfeSEduard Zingerman
271f4fe3cfeSEduard Zingerman SEC("tc")
272f4fe3cfeSEduard Zingerman __description("Spill a u32 const scalar. Refill as u16 from fp-6. Offset to skb->data")
273f4fe3cfeSEduard Zingerman __failure __msg("invalid access to packet")
_6_offset_to_skb_data(void)274f4fe3cfeSEduard Zingerman __naked void _6_offset_to_skb_data(void)
275f4fe3cfeSEduard Zingerman {
276f4fe3cfeSEduard Zingerman asm volatile (" \
277f4fe3cfeSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
278f4fe3cfeSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
279f4fe3cfeSEduard Zingerman w4 = 20; \
280f4fe3cfeSEduard Zingerman *(u32*)(r10 - 8) = r4; \
281f4fe3cfeSEduard Zingerman r4 = *(u16*)(r10 - 6); \
282f4fe3cfeSEduard Zingerman r0 = r2; \
283f4fe3cfeSEduard Zingerman /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=65535 */\
284f4fe3cfeSEduard Zingerman r0 += r4; \
285f4fe3cfeSEduard Zingerman /* if (r0 > r3) R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=umax=65535 */\
286f4fe3cfeSEduard Zingerman if r0 > r3 goto l0_%=; \
287f4fe3cfeSEduard Zingerman /* r0 = *(u32 *)r2 R0=pkt,umax=65535 R2=pkt R3=pkt_end R4=20 */\
288f4fe3cfeSEduard Zingerman r0 = *(u32*)(r2 + 0); \
289f4fe3cfeSEduard Zingerman l0_%=: r0 = 0; \
290f4fe3cfeSEduard Zingerman exit; \
291f4fe3cfeSEduard Zingerman " :
292f4fe3cfeSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
293f4fe3cfeSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
294f4fe3cfeSEduard Zingerman : __clobber_all);
295f4fe3cfeSEduard Zingerman }
296f4fe3cfeSEduard Zingerman
297f4fe3cfeSEduard Zingerman SEC("tc")
298f4fe3cfeSEduard Zingerman __description("Spill and refill a u32 const scalar at non 8byte aligned stack addr. Offset to skb->data")
299f4fe3cfeSEduard Zingerman __failure __msg("invalid access to packet")
addr_offset_to_skb_data(void)300f4fe3cfeSEduard Zingerman __naked void addr_offset_to_skb_data(void)
301f4fe3cfeSEduard Zingerman {
302f4fe3cfeSEduard Zingerman asm volatile (" \
303f4fe3cfeSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
304f4fe3cfeSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
305f4fe3cfeSEduard Zingerman w4 = 20; \
306f4fe3cfeSEduard Zingerman *(u32*)(r10 - 8) = r4; \
307f4fe3cfeSEduard Zingerman *(u32*)(r10 - 4) = r4; \
308f4fe3cfeSEduard Zingerman r4 = *(u32*)(r10 - 4); \
309f4fe3cfeSEduard Zingerman r0 = r2; \
310f4fe3cfeSEduard Zingerman /* r0 += r4 R0=pkt R2=pkt R3=pkt_end R4=umax=U32_MAX */\
311f4fe3cfeSEduard Zingerman r0 += r4; \
312f4fe3cfeSEduard Zingerman /* if (r0 > r3) R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
313f4fe3cfeSEduard Zingerman if r0 > r3 goto l0_%=; \
314f4fe3cfeSEduard Zingerman /* r0 = *(u32 *)r2 R0=pkt,umax=U32_MAX R2=pkt R3=pkt_end R4= */\
315f4fe3cfeSEduard Zingerman r0 = *(u32*)(r2 + 0); \
316f4fe3cfeSEduard Zingerman l0_%=: r0 = 0; \
317f4fe3cfeSEduard Zingerman exit; \
318f4fe3cfeSEduard Zingerman " :
319f4fe3cfeSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
320f4fe3cfeSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end))
321f4fe3cfeSEduard Zingerman : __clobber_all);
322f4fe3cfeSEduard Zingerman }
323f4fe3cfeSEduard Zingerman
324f4fe3cfeSEduard Zingerman SEC("tc")
325f4fe3cfeSEduard Zingerman __description("Spill and refill a umax=40 bounded scalar. Offset to skb->data")
326f4fe3cfeSEduard Zingerman __success __retval(0)
scalar_offset_to_skb_data_2(void)327f4fe3cfeSEduard Zingerman __naked void scalar_offset_to_skb_data_2(void)
328f4fe3cfeSEduard Zingerman {
329f4fe3cfeSEduard Zingerman asm volatile (" \
330f4fe3cfeSEduard Zingerman r2 = *(u32*)(r1 + %[__sk_buff_data]); \
331f4fe3cfeSEduard Zingerman r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
332f4fe3cfeSEduard Zingerman r4 = *(u64*)(r1 + %[__sk_buff_tstamp]); \
333f4fe3cfeSEduard Zingerman if r4 <= 40 goto l0_%=; \
334f4fe3cfeSEduard Zingerman r0 = 0; \
335f4fe3cfeSEduard Zingerman exit; \
336f4fe3cfeSEduard Zingerman l0_%=: /* *(u32 *)(r10 -8) = r4 R4=umax=40 */ \
337f4fe3cfeSEduard Zingerman *(u32*)(r10 - 8) = r4; \
338f4fe3cfeSEduard Zingerman /* r4 = (*u32 *)(r10 - 8) */ \
339f4fe3cfeSEduard Zingerman r4 = *(u32*)(r10 - 8); \
340f4fe3cfeSEduard Zingerman /* r2 += r4 R2=pkt R4=umax=40 */ \
341f4fe3cfeSEduard Zingerman r2 += r4; \
342f4fe3cfeSEduard Zingerman /* r0 = r2 R2=pkt,umax=40 R4=umax=40 */ \
343f4fe3cfeSEduard Zingerman r0 = r2; \
344f4fe3cfeSEduard Zingerman /* r2 += 20 R0=pkt,umax=40 R2=pkt,umax=40 */ \
345f4fe3cfeSEduard Zingerman r2 += 20; \
346f4fe3cfeSEduard Zingerman /* if (r2 > r3) R0=pkt,umax=40 R2=pkt,off=20,umax=40 */\
347f4fe3cfeSEduard Zingerman if r2 > r3 goto l1_%=; \
348f4fe3cfeSEduard Zingerman /* r0 = *(u32 *)r0 R0=pkt,r=20,umax=40 R2=pkt,off=20,r=20,umax=40 */\
349f4fe3cfeSEduard Zingerman r0 = *(u32*)(r0 + 0); \
350f4fe3cfeSEduard Zingerman l1_%=: r0 = 0; \
351f4fe3cfeSEduard Zingerman exit; \
352f4fe3cfeSEduard Zingerman " :
353f4fe3cfeSEduard Zingerman : __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
354f4fe3cfeSEduard Zingerman __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
355f4fe3cfeSEduard Zingerman __imm_const(__sk_buff_tstamp, offsetof(struct __sk_buff, tstamp))
356f4fe3cfeSEduard Zingerman : __clobber_all);
357f4fe3cfeSEduard Zingerman }
358f4fe3cfeSEduard Zingerman
359f4fe3cfeSEduard Zingerman SEC("tc")
360f4fe3cfeSEduard Zingerman __description("Spill a u32 scalar at fp-4 and then at fp-8")
361f4fe3cfeSEduard Zingerman __success __retval(0)
and_then_at_fp_8(void)362f4fe3cfeSEduard Zingerman __naked void and_then_at_fp_8(void)
363f4fe3cfeSEduard Zingerman {
364f4fe3cfeSEduard Zingerman asm volatile (" \
365f4fe3cfeSEduard Zingerman w4 = 4321; \
366f4fe3cfeSEduard Zingerman *(u32*)(r10 - 4) = r4; \
367f4fe3cfeSEduard Zingerman *(u32*)(r10 - 8) = r4; \
368f4fe3cfeSEduard Zingerman r4 = *(u64*)(r10 - 8); \
369f4fe3cfeSEduard Zingerman r0 = 0; \
370f4fe3cfeSEduard Zingerman exit; \
371f4fe3cfeSEduard Zingerman " ::: __clobber_all);
372f4fe3cfeSEduard Zingerman }
373f4fe3cfeSEduard Zingerman
374*f57ade27SMaxim Mikityanskiy SEC("xdp")
375*f57ade27SMaxim Mikityanskiy __description("32-bit spill of 64-bit reg should clear ID")
376*f57ade27SMaxim Mikityanskiy __failure __msg("math between ctx pointer and 4294967295 is not allowed")
spill_32bit_of_64bit_fail(void)377*f57ade27SMaxim Mikityanskiy __naked void spill_32bit_of_64bit_fail(void)
378*f57ade27SMaxim Mikityanskiy {
379*f57ade27SMaxim Mikityanskiy asm volatile (" \
380*f57ade27SMaxim Mikityanskiy r6 = r1; \
381*f57ade27SMaxim Mikityanskiy /* Roll one bit to force the verifier to track both branches. */\
382*f57ade27SMaxim Mikityanskiy call %[bpf_get_prandom_u32]; \
383*f57ade27SMaxim Mikityanskiy r0 &= 0x8; \
384*f57ade27SMaxim Mikityanskiy /* Put a large number into r1. */ \
385*f57ade27SMaxim Mikityanskiy r1 = 0xffffffff; \
386*f57ade27SMaxim Mikityanskiy r1 <<= 32; \
387*f57ade27SMaxim Mikityanskiy r1 += r0; \
388*f57ade27SMaxim Mikityanskiy /* Assign an ID to r1. */ \
389*f57ade27SMaxim Mikityanskiy r2 = r1; \
390*f57ade27SMaxim Mikityanskiy /* 32-bit spill r1 to stack - should clear the ID! */\
391*f57ade27SMaxim Mikityanskiy *(u32*)(r10 - 8) = r1; \
392*f57ade27SMaxim Mikityanskiy /* 32-bit fill r2 from stack. */ \
393*f57ade27SMaxim Mikityanskiy r2 = *(u32*)(r10 - 8); \
394*f57ade27SMaxim Mikityanskiy /* Compare r2 with another register to trigger find_equal_scalars.\
395*f57ade27SMaxim Mikityanskiy * Having one random bit is important here, otherwise the verifier cuts\
396*f57ade27SMaxim Mikityanskiy * the corners. If the ID was mistakenly preserved on spill, this would\
397*f57ade27SMaxim Mikityanskiy * cause the verifier to think that r1 is also equal to zero in one of\
398*f57ade27SMaxim Mikityanskiy * the branches, and equal to eight on the other branch.\
399*f57ade27SMaxim Mikityanskiy */ \
400*f57ade27SMaxim Mikityanskiy r3 = 0; \
401*f57ade27SMaxim Mikityanskiy if r2 != r3 goto l0_%=; \
402*f57ade27SMaxim Mikityanskiy l0_%=: r1 >>= 32; \
403*f57ade27SMaxim Mikityanskiy /* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
404*f57ade27SMaxim Mikityanskiy * read will happen, because it actually contains 0xffffffff.\
405*f57ade27SMaxim Mikityanskiy */ \
406*f57ade27SMaxim Mikityanskiy r6 += r1; \
407*f57ade27SMaxim Mikityanskiy r0 = *(u32*)(r6 + 0); \
408*f57ade27SMaxim Mikityanskiy exit; \
409*f57ade27SMaxim Mikityanskiy " :
410*f57ade27SMaxim Mikityanskiy : __imm(bpf_get_prandom_u32)
411*f57ade27SMaxim Mikityanskiy : __clobber_all);
412*f57ade27SMaxim Mikityanskiy }
413*f57ade27SMaxim Mikityanskiy
414*f57ade27SMaxim Mikityanskiy SEC("xdp")
415*f57ade27SMaxim Mikityanskiy __description("16-bit spill of 32-bit reg should clear ID")
416*f57ade27SMaxim Mikityanskiy __failure __msg("dereference of modified ctx ptr R6 off=65535 disallowed")
spill_16bit_of_32bit_fail(void)417*f57ade27SMaxim Mikityanskiy __naked void spill_16bit_of_32bit_fail(void)
418*f57ade27SMaxim Mikityanskiy {
419*f57ade27SMaxim Mikityanskiy asm volatile (" \
420*f57ade27SMaxim Mikityanskiy r6 = r1; \
421*f57ade27SMaxim Mikityanskiy /* Roll one bit to force the verifier to track both branches. */\
422*f57ade27SMaxim Mikityanskiy call %[bpf_get_prandom_u32]; \
423*f57ade27SMaxim Mikityanskiy r0 &= 0x8; \
424*f57ade27SMaxim Mikityanskiy /* Put a large number into r1. */ \
425*f57ade27SMaxim Mikityanskiy w1 = 0xffff0000; \
426*f57ade27SMaxim Mikityanskiy r1 += r0; \
427*f57ade27SMaxim Mikityanskiy /* Assign an ID to r1. */ \
428*f57ade27SMaxim Mikityanskiy r2 = r1; \
429*f57ade27SMaxim Mikityanskiy /* 16-bit spill r1 to stack - should clear the ID! */\
430*f57ade27SMaxim Mikityanskiy *(u16*)(r10 - 8) = r1; \
431*f57ade27SMaxim Mikityanskiy /* 16-bit fill r2 from stack. */ \
432*f57ade27SMaxim Mikityanskiy r2 = *(u16*)(r10 - 8); \
433*f57ade27SMaxim Mikityanskiy /* Compare r2 with another register to trigger find_equal_scalars.\
434*f57ade27SMaxim Mikityanskiy * Having one random bit is important here, otherwise the verifier cuts\
435*f57ade27SMaxim Mikityanskiy * the corners. If the ID was mistakenly preserved on spill, this would\
436*f57ade27SMaxim Mikityanskiy * cause the verifier to think that r1 is also equal to zero in one of\
437*f57ade27SMaxim Mikityanskiy * the branches, and equal to eight on the other branch.\
438*f57ade27SMaxim Mikityanskiy */ \
439*f57ade27SMaxim Mikityanskiy r3 = 0; \
440*f57ade27SMaxim Mikityanskiy if r2 != r3 goto l0_%=; \
441*f57ade27SMaxim Mikityanskiy l0_%=: r1 >>= 16; \
442*f57ade27SMaxim Mikityanskiy /* At this point, if the verifier thinks that r1 is 0, an out-of-bounds\
443*f57ade27SMaxim Mikityanskiy * read will happen, because it actually contains 0xffff.\
444*f57ade27SMaxim Mikityanskiy */ \
445*f57ade27SMaxim Mikityanskiy r6 += r1; \
446*f57ade27SMaxim Mikityanskiy r0 = *(u32*)(r6 + 0); \
447*f57ade27SMaxim Mikityanskiy exit; \
448*f57ade27SMaxim Mikityanskiy " :
449*f57ade27SMaxim Mikityanskiy : __imm(bpf_get_prandom_u32)
450*f57ade27SMaxim Mikityanskiy : __clobber_all);
451*f57ade27SMaxim Mikityanskiy }
452*f57ade27SMaxim Mikityanskiy
453f4fe3cfeSEduard Zingerman char _license[] SEC("license") = "GPL";
454