1*82887c25SEduard Zingerman // SPDX-License-Identifier: GPL-2.0
2*82887c25SEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/unpriv.c */
3*82887c25SEduard Zingerman
4*82887c25SEduard Zingerman #include <linux/bpf.h>
5*82887c25SEduard Zingerman #include <bpf/bpf_helpers.h>
6*82887c25SEduard Zingerman #include "../../../include/linux/filter.h"
7*82887c25SEduard Zingerman #include "bpf_misc.h"
8*82887c25SEduard Zingerman
9*82887c25SEduard Zingerman #define BPF_SK_LOOKUP(func) \
10*82887c25SEduard Zingerman /* struct bpf_sock_tuple tuple = {} */ \
11*82887c25SEduard Zingerman "r2 = 0;" \
12*82887c25SEduard Zingerman "*(u32*)(r10 - 8) = r2;" \
13*82887c25SEduard Zingerman "*(u64*)(r10 - 16) = r2;" \
14*82887c25SEduard Zingerman "*(u64*)(r10 - 24) = r2;" \
15*82887c25SEduard Zingerman "*(u64*)(r10 - 32) = r2;" \
16*82887c25SEduard Zingerman "*(u64*)(r10 - 40) = r2;" \
17*82887c25SEduard Zingerman "*(u64*)(r10 - 48) = r2;" \
18*82887c25SEduard Zingerman /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
19*82887c25SEduard Zingerman "r2 = r10;" \
20*82887c25SEduard Zingerman "r2 += -48;" \
21*82887c25SEduard Zingerman "r3 = %[sizeof_bpf_sock_tuple];"\
22*82887c25SEduard Zingerman "r4 = 0;" \
23*82887c25SEduard Zingerman "r5 = 0;" \
24*82887c25SEduard Zingerman "call %[" #func "];"
25*82887c25SEduard Zingerman
26*82887c25SEduard Zingerman struct {
27*82887c25SEduard Zingerman __uint(type, BPF_MAP_TYPE_HASH);
28*82887c25SEduard Zingerman __uint(max_entries, 1);
29*82887c25SEduard Zingerman __type(key, long long);
30*82887c25SEduard Zingerman __type(value, long long);
31*82887c25SEduard Zingerman } map_hash_8b SEC(".maps");
32*82887c25SEduard Zingerman
33*82887c25SEduard Zingerman void dummy_prog_42_socket(void);
34*82887c25SEduard Zingerman void dummy_prog_24_socket(void);
35*82887c25SEduard Zingerman void dummy_prog_loop1_socket(void);
36*82887c25SEduard Zingerman
37*82887c25SEduard Zingerman struct {
38*82887c25SEduard Zingerman __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
39*82887c25SEduard Zingerman __uint(max_entries, 4);
40*82887c25SEduard Zingerman __uint(key_size, sizeof(int));
41*82887c25SEduard Zingerman __array(values, void (void));
42*82887c25SEduard Zingerman } map_prog1_socket SEC(".maps") = {
43*82887c25SEduard Zingerman .values = {
44*82887c25SEduard Zingerman [0] = (void *)&dummy_prog_42_socket,
45*82887c25SEduard Zingerman [1] = (void *)&dummy_prog_loop1_socket,
46*82887c25SEduard Zingerman [2] = (void *)&dummy_prog_24_socket,
47*82887c25SEduard Zingerman },
48*82887c25SEduard Zingerman };
49*82887c25SEduard Zingerman
50*82887c25SEduard Zingerman SEC("socket")
51*82887c25SEduard Zingerman __auxiliary __auxiliary_unpriv
dummy_prog_42_socket(void)52*82887c25SEduard Zingerman __naked void dummy_prog_42_socket(void)
53*82887c25SEduard Zingerman {
54*82887c25SEduard Zingerman asm volatile ("r0 = 42; exit;");
55*82887c25SEduard Zingerman }
56*82887c25SEduard Zingerman
57*82887c25SEduard Zingerman SEC("socket")
58*82887c25SEduard Zingerman __auxiliary __auxiliary_unpriv
dummy_prog_24_socket(void)59*82887c25SEduard Zingerman __naked void dummy_prog_24_socket(void)
60*82887c25SEduard Zingerman {
61*82887c25SEduard Zingerman asm volatile ("r0 = 24; exit;");
62*82887c25SEduard Zingerman }
63*82887c25SEduard Zingerman
64*82887c25SEduard Zingerman SEC("socket")
65*82887c25SEduard Zingerman __auxiliary __auxiliary_unpriv
dummy_prog_loop1_socket(void)66*82887c25SEduard Zingerman __naked void dummy_prog_loop1_socket(void)
67*82887c25SEduard Zingerman {
68*82887c25SEduard Zingerman asm volatile (" \
69*82887c25SEduard Zingerman r3 = 1; \
70*82887c25SEduard Zingerman r2 = %[map_prog1_socket] ll; \
71*82887c25SEduard Zingerman call %[bpf_tail_call]; \
72*82887c25SEduard Zingerman r0 = 41; \
73*82887c25SEduard Zingerman exit; \
74*82887c25SEduard Zingerman " :
75*82887c25SEduard Zingerman : __imm(bpf_tail_call),
76*82887c25SEduard Zingerman __imm_addr(map_prog1_socket)
77*82887c25SEduard Zingerman : __clobber_all);
78*82887c25SEduard Zingerman }
79*82887c25SEduard Zingerman
80*82887c25SEduard Zingerman SEC("socket")
81*82887c25SEduard Zingerman __description("unpriv: return pointer")
82*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R0 leaks addr")
__retval(POINTER_VALUE)83*82887c25SEduard Zingerman __retval(POINTER_VALUE)
84*82887c25SEduard Zingerman __naked void unpriv_return_pointer(void)
85*82887c25SEduard Zingerman {
86*82887c25SEduard Zingerman asm volatile (" \
87*82887c25SEduard Zingerman r0 = r10; \
88*82887c25SEduard Zingerman exit; \
89*82887c25SEduard Zingerman " ::: __clobber_all);
90*82887c25SEduard Zingerman }
91*82887c25SEduard Zingerman
92*82887c25SEduard Zingerman SEC("socket")
93*82887c25SEduard Zingerman __description("unpriv: add const to pointer")
94*82887c25SEduard Zingerman __success __success_unpriv __retval(0)
unpriv_add_const_to_pointer(void)95*82887c25SEduard Zingerman __naked void unpriv_add_const_to_pointer(void)
96*82887c25SEduard Zingerman {
97*82887c25SEduard Zingerman asm volatile (" \
98*82887c25SEduard Zingerman r1 += 8; \
99*82887c25SEduard Zingerman r0 = 0; \
100*82887c25SEduard Zingerman exit; \
101*82887c25SEduard Zingerman " ::: __clobber_all);
102*82887c25SEduard Zingerman }
103*82887c25SEduard Zingerman
104*82887c25SEduard Zingerman SEC("socket")
105*82887c25SEduard Zingerman __description("unpriv: add pointer to pointer")
106*82887c25SEduard Zingerman __failure __msg("R1 pointer += pointer")
107*82887c25SEduard Zingerman __failure_unpriv
unpriv_add_pointer_to_pointer(void)108*82887c25SEduard Zingerman __naked void unpriv_add_pointer_to_pointer(void)
109*82887c25SEduard Zingerman {
110*82887c25SEduard Zingerman asm volatile (" \
111*82887c25SEduard Zingerman r1 += r10; \
112*82887c25SEduard Zingerman r0 = 0; \
113*82887c25SEduard Zingerman exit; \
114*82887c25SEduard Zingerman " ::: __clobber_all);
115*82887c25SEduard Zingerman }
116*82887c25SEduard Zingerman
117*82887c25SEduard Zingerman SEC("socket")
118*82887c25SEduard Zingerman __description("unpriv: neg pointer")
119*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R1 pointer arithmetic")
120*82887c25SEduard Zingerman __retval(0)
unpriv_neg_pointer(void)121*82887c25SEduard Zingerman __naked void unpriv_neg_pointer(void)
122*82887c25SEduard Zingerman {
123*82887c25SEduard Zingerman asm volatile (" \
124*82887c25SEduard Zingerman r1 = -r1; \
125*82887c25SEduard Zingerman r0 = 0; \
126*82887c25SEduard Zingerman exit; \
127*82887c25SEduard Zingerman " ::: __clobber_all);
128*82887c25SEduard Zingerman }
129*82887c25SEduard Zingerman
130*82887c25SEduard Zingerman SEC("socket")
131*82887c25SEduard Zingerman __description("unpriv: cmp pointer with const")
132*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R1 pointer comparison")
133*82887c25SEduard Zingerman __retval(0)
unpriv_cmp_pointer_with_const(void)134*82887c25SEduard Zingerman __naked void unpriv_cmp_pointer_with_const(void)
135*82887c25SEduard Zingerman {
136*82887c25SEduard Zingerman asm volatile (" \
137*82887c25SEduard Zingerman if r1 == 0 goto l0_%=; \
138*82887c25SEduard Zingerman l0_%=: r0 = 0; \
139*82887c25SEduard Zingerman exit; \
140*82887c25SEduard Zingerman " ::: __clobber_all);
141*82887c25SEduard Zingerman }
142*82887c25SEduard Zingerman
143*82887c25SEduard Zingerman SEC("socket")
144*82887c25SEduard Zingerman __description("unpriv: cmp pointer with pointer")
145*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R10 pointer comparison")
146*82887c25SEduard Zingerman __retval(0)
unpriv_cmp_pointer_with_pointer(void)147*82887c25SEduard Zingerman __naked void unpriv_cmp_pointer_with_pointer(void)
148*82887c25SEduard Zingerman {
149*82887c25SEduard Zingerman asm volatile (" \
150*82887c25SEduard Zingerman if r1 == r10 goto l0_%=; \
151*82887c25SEduard Zingerman l0_%=: r0 = 0; \
152*82887c25SEduard Zingerman exit; \
153*82887c25SEduard Zingerman " ::: __clobber_all);
154*82887c25SEduard Zingerman }
155*82887c25SEduard Zingerman
156*82887c25SEduard Zingerman SEC("tracepoint")
157*82887c25SEduard Zingerman __description("unpriv: check that printk is disallowed")
158*82887c25SEduard Zingerman __success
check_that_printk_is_disallowed(void)159*82887c25SEduard Zingerman __naked void check_that_printk_is_disallowed(void)
160*82887c25SEduard Zingerman {
161*82887c25SEduard Zingerman asm volatile (" \
162*82887c25SEduard Zingerman r1 = 0; \
163*82887c25SEduard Zingerman *(u64*)(r10 - 8) = r1; \
164*82887c25SEduard Zingerman r1 = r10; \
165*82887c25SEduard Zingerman r1 += -8; \
166*82887c25SEduard Zingerman r2 = 8; \
167*82887c25SEduard Zingerman r3 = r1; \
168*82887c25SEduard Zingerman call %[bpf_trace_printk]; \
169*82887c25SEduard Zingerman r0 = 0; \
170*82887c25SEduard Zingerman exit; \
171*82887c25SEduard Zingerman " :
172*82887c25SEduard Zingerman : __imm(bpf_trace_printk)
173*82887c25SEduard Zingerman : __clobber_all);
174*82887c25SEduard Zingerman }
175*82887c25SEduard Zingerman
176*82887c25SEduard Zingerman SEC("socket")
177*82887c25SEduard Zingerman __description("unpriv: pass pointer to helper function")
178*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R4 leaks addr")
179*82887c25SEduard Zingerman __retval(0)
pass_pointer_to_helper_function(void)180*82887c25SEduard Zingerman __naked void pass_pointer_to_helper_function(void)
181*82887c25SEduard Zingerman {
182*82887c25SEduard Zingerman asm volatile (" \
183*82887c25SEduard Zingerman r1 = 0; \
184*82887c25SEduard Zingerman *(u64*)(r10 - 8) = r1; \
185*82887c25SEduard Zingerman r2 = r10; \
186*82887c25SEduard Zingerman r2 += -8; \
187*82887c25SEduard Zingerman r1 = %[map_hash_8b] ll; \
188*82887c25SEduard Zingerman r3 = r2; \
189*82887c25SEduard Zingerman r4 = r2; \
190*82887c25SEduard Zingerman call %[bpf_map_update_elem]; \
191*82887c25SEduard Zingerman r0 = 0; \
192*82887c25SEduard Zingerman exit; \
193*82887c25SEduard Zingerman " :
194*82887c25SEduard Zingerman : __imm(bpf_map_update_elem),
195*82887c25SEduard Zingerman __imm_addr(map_hash_8b)
196*82887c25SEduard Zingerman : __clobber_all);
197*82887c25SEduard Zingerman }
198*82887c25SEduard Zingerman
199*82887c25SEduard Zingerman SEC("socket")
200*82887c25SEduard Zingerman __description("unpriv: indirectly pass pointer on stack to helper function")
201*82887c25SEduard Zingerman __success __failure_unpriv
202*82887c25SEduard Zingerman __msg_unpriv("invalid indirect read from stack R2 off -8+0 size 8")
203*82887c25SEduard Zingerman __retval(0)
on_stack_to_helper_function(void)204*82887c25SEduard Zingerman __naked void on_stack_to_helper_function(void)
205*82887c25SEduard Zingerman {
206*82887c25SEduard Zingerman asm volatile (" \
207*82887c25SEduard Zingerman *(u64*)(r10 - 8) = r10; \
208*82887c25SEduard Zingerman r2 = r10; \
209*82887c25SEduard Zingerman r2 += -8; \
210*82887c25SEduard Zingerman r1 = %[map_hash_8b] ll; \
211*82887c25SEduard Zingerman call %[bpf_map_lookup_elem]; \
212*82887c25SEduard Zingerman r0 = 0; \
213*82887c25SEduard Zingerman exit; \
214*82887c25SEduard Zingerman " :
215*82887c25SEduard Zingerman : __imm(bpf_map_lookup_elem),
216*82887c25SEduard Zingerman __imm_addr(map_hash_8b)
217*82887c25SEduard Zingerman : __clobber_all);
218*82887c25SEduard Zingerman }
219*82887c25SEduard Zingerman
220*82887c25SEduard Zingerman SEC("socket")
221*82887c25SEduard Zingerman __description("unpriv: mangle pointer on stack 1")
222*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
223*82887c25SEduard Zingerman __retval(0)
mangle_pointer_on_stack_1(void)224*82887c25SEduard Zingerman __naked void mangle_pointer_on_stack_1(void)
225*82887c25SEduard Zingerman {
226*82887c25SEduard Zingerman asm volatile (" \
227*82887c25SEduard Zingerman *(u64*)(r10 - 8) = r10; \
228*82887c25SEduard Zingerman r0 = 0; \
229*82887c25SEduard Zingerman *(u32*)(r10 - 8) = r0; \
230*82887c25SEduard Zingerman r0 = 0; \
231*82887c25SEduard Zingerman exit; \
232*82887c25SEduard Zingerman " ::: __clobber_all);
233*82887c25SEduard Zingerman }
234*82887c25SEduard Zingerman
235*82887c25SEduard Zingerman SEC("socket")
236*82887c25SEduard Zingerman __description("unpriv: mangle pointer on stack 2")
237*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("attempt to corrupt spilled")
238*82887c25SEduard Zingerman __retval(0)
mangle_pointer_on_stack_2(void)239*82887c25SEduard Zingerman __naked void mangle_pointer_on_stack_2(void)
240*82887c25SEduard Zingerman {
241*82887c25SEduard Zingerman asm volatile (" \
242*82887c25SEduard Zingerman *(u64*)(r10 - 8) = r10; \
243*82887c25SEduard Zingerman r0 = 0; \
244*82887c25SEduard Zingerman *(u8*)(r10 - 1) = r0; \
245*82887c25SEduard Zingerman r0 = 0; \
246*82887c25SEduard Zingerman exit; \
247*82887c25SEduard Zingerman " ::: __clobber_all);
248*82887c25SEduard Zingerman }
249*82887c25SEduard Zingerman
250*82887c25SEduard Zingerman SEC("socket")
251*82887c25SEduard Zingerman __description("unpriv: read pointer from stack in small chunks")
252*82887c25SEduard Zingerman __failure __msg("invalid size")
253*82887c25SEduard Zingerman __failure_unpriv
from_stack_in_small_chunks(void)254*82887c25SEduard Zingerman __naked void from_stack_in_small_chunks(void)
255*82887c25SEduard Zingerman {
256*82887c25SEduard Zingerman asm volatile (" \
257*82887c25SEduard Zingerman *(u64*)(r10 - 8) = r10; \
258*82887c25SEduard Zingerman r0 = *(u32*)(r10 - 8); \
259*82887c25SEduard Zingerman r0 = 0; \
260*82887c25SEduard Zingerman exit; \
261*82887c25SEduard Zingerman " ::: __clobber_all);
262*82887c25SEduard Zingerman }
263*82887c25SEduard Zingerman
264*82887c25SEduard Zingerman SEC("socket")
265*82887c25SEduard Zingerman __description("unpriv: write pointer into ctx")
266*82887c25SEduard Zingerman __failure __msg("invalid bpf_context access")
267*82887c25SEduard Zingerman __failure_unpriv __msg_unpriv("R1 leaks addr")
unpriv_write_pointer_into_ctx(void)268*82887c25SEduard Zingerman __naked void unpriv_write_pointer_into_ctx(void)
269*82887c25SEduard Zingerman {
270*82887c25SEduard Zingerman asm volatile (" \
271*82887c25SEduard Zingerman *(u64*)(r1 + 0) = r1; \
272*82887c25SEduard Zingerman r0 = 0; \
273*82887c25SEduard Zingerman exit; \
274*82887c25SEduard Zingerman " ::: __clobber_all);
275*82887c25SEduard Zingerman }
276*82887c25SEduard Zingerman
277*82887c25SEduard Zingerman SEC("socket")
278*82887c25SEduard Zingerman __description("unpriv: spill/fill of ctx")
279*82887c25SEduard Zingerman __success __success_unpriv __retval(0)
unpriv_spill_fill_of_ctx(void)280*82887c25SEduard Zingerman __naked void unpriv_spill_fill_of_ctx(void)
281*82887c25SEduard Zingerman {
282*82887c25SEduard Zingerman asm volatile (" \
283*82887c25SEduard Zingerman r6 = r10; \
284*82887c25SEduard Zingerman r6 += -8; \
285*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r1; \
286*82887c25SEduard Zingerman r1 = *(u64*)(r6 + 0); \
287*82887c25SEduard Zingerman r0 = 0; \
288*82887c25SEduard Zingerman exit; \
289*82887c25SEduard Zingerman " ::: __clobber_all);
290*82887c25SEduard Zingerman }
291*82887c25SEduard Zingerman
292*82887c25SEduard Zingerman SEC("tc")
293*82887c25SEduard Zingerman __description("unpriv: spill/fill of ctx 2")
294*82887c25SEduard Zingerman __success __retval(0)
spill_fill_of_ctx_2(void)295*82887c25SEduard Zingerman __naked void spill_fill_of_ctx_2(void)
296*82887c25SEduard Zingerman {
297*82887c25SEduard Zingerman asm volatile (" \
298*82887c25SEduard Zingerman r6 = r10; \
299*82887c25SEduard Zingerman r6 += -8; \
300*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r1; \
301*82887c25SEduard Zingerman r1 = *(u64*)(r6 + 0); \
302*82887c25SEduard Zingerman call %[bpf_get_hash_recalc]; \
303*82887c25SEduard Zingerman r0 = 0; \
304*82887c25SEduard Zingerman exit; \
305*82887c25SEduard Zingerman " :
306*82887c25SEduard Zingerman : __imm(bpf_get_hash_recalc)
307*82887c25SEduard Zingerman : __clobber_all);
308*82887c25SEduard Zingerman }
309*82887c25SEduard Zingerman
310*82887c25SEduard Zingerman SEC("tc")
311*82887c25SEduard Zingerman __description("unpriv: spill/fill of ctx 3")
312*82887c25SEduard Zingerman __failure __msg("R1 type=fp expected=ctx")
spill_fill_of_ctx_3(void)313*82887c25SEduard Zingerman __naked void spill_fill_of_ctx_3(void)
314*82887c25SEduard Zingerman {
315*82887c25SEduard Zingerman asm volatile (" \
316*82887c25SEduard Zingerman r6 = r10; \
317*82887c25SEduard Zingerman r6 += -8; \
318*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r1; \
319*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r10; \
320*82887c25SEduard Zingerman r1 = *(u64*)(r6 + 0); \
321*82887c25SEduard Zingerman call %[bpf_get_hash_recalc]; \
322*82887c25SEduard Zingerman exit; \
323*82887c25SEduard Zingerman " :
324*82887c25SEduard Zingerman : __imm(bpf_get_hash_recalc)
325*82887c25SEduard Zingerman : __clobber_all);
326*82887c25SEduard Zingerman }
327*82887c25SEduard Zingerman
328*82887c25SEduard Zingerman SEC("tc")
329*82887c25SEduard Zingerman __description("unpriv: spill/fill of ctx 4")
330*82887c25SEduard Zingerman __failure __msg("R1 type=scalar expected=ctx")
spill_fill_of_ctx_4(void)331*82887c25SEduard Zingerman __naked void spill_fill_of_ctx_4(void)
332*82887c25SEduard Zingerman {
333*82887c25SEduard Zingerman asm volatile (" \
334*82887c25SEduard Zingerman r6 = r10; \
335*82887c25SEduard Zingerman r6 += -8; \
336*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r1; \
337*82887c25SEduard Zingerman r0 = 1; \
338*82887c25SEduard Zingerman lock *(u64 *)(r10 - 8) += r0; \
339*82887c25SEduard Zingerman r1 = *(u64*)(r6 + 0); \
340*82887c25SEduard Zingerman call %[bpf_get_hash_recalc]; \
341*82887c25SEduard Zingerman exit; \
342*82887c25SEduard Zingerman " :
343*82887c25SEduard Zingerman : __imm(bpf_get_hash_recalc)
344*82887c25SEduard Zingerman : __clobber_all);
345*82887c25SEduard Zingerman }
346*82887c25SEduard Zingerman
347*82887c25SEduard Zingerman SEC("tc")
348*82887c25SEduard Zingerman __description("unpriv: spill/fill of different pointers stx")
349*82887c25SEduard Zingerman __failure __msg("same insn cannot be used with different pointers")
fill_of_different_pointers_stx(void)350*82887c25SEduard Zingerman __naked void fill_of_different_pointers_stx(void)
351*82887c25SEduard Zingerman {
352*82887c25SEduard Zingerman asm volatile (" \
353*82887c25SEduard Zingerman r3 = 42; \
354*82887c25SEduard Zingerman r6 = r10; \
355*82887c25SEduard Zingerman r6 += -8; \
356*82887c25SEduard Zingerman if r1 == 0 goto l0_%=; \
357*82887c25SEduard Zingerman r2 = r10; \
358*82887c25SEduard Zingerman r2 += -16; \
359*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r2; \
360*82887c25SEduard Zingerman l0_%=: if r1 != 0 goto l1_%=; \
361*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r1; \
362*82887c25SEduard Zingerman l1_%=: r1 = *(u64*)(r6 + 0); \
363*82887c25SEduard Zingerman *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
364*82887c25SEduard Zingerman r0 = 0; \
365*82887c25SEduard Zingerman exit; \
366*82887c25SEduard Zingerman " :
367*82887c25SEduard Zingerman : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
368*82887c25SEduard Zingerman : __clobber_all);
369*82887c25SEduard Zingerman }
370*82887c25SEduard Zingerman
371*82887c25SEduard Zingerman /* Same as above, but use BPF_ST_MEM to save 42
372*82887c25SEduard Zingerman * instead of BPF_STX_MEM.
373*82887c25SEduard Zingerman */
374*82887c25SEduard Zingerman SEC("tc")
375*82887c25SEduard Zingerman __description("unpriv: spill/fill of different pointers st")
376*82887c25SEduard Zingerman __failure __msg("same insn cannot be used with different pointers")
fill_of_different_pointers_st(void)377*82887c25SEduard Zingerman __naked void fill_of_different_pointers_st(void)
378*82887c25SEduard Zingerman {
379*82887c25SEduard Zingerman asm volatile (" \
380*82887c25SEduard Zingerman r6 = r10; \
381*82887c25SEduard Zingerman r6 += -8; \
382*82887c25SEduard Zingerman if r1 == 0 goto l0_%=; \
383*82887c25SEduard Zingerman r2 = r10; \
384*82887c25SEduard Zingerman r2 += -16; \
385*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r2; \
386*82887c25SEduard Zingerman l0_%=: if r1 != 0 goto l1_%=; \
387*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r1; \
388*82887c25SEduard Zingerman l1_%=: r1 = *(u64*)(r6 + 0); \
389*82887c25SEduard Zingerman .8byte %[st_mem]; \
390*82887c25SEduard Zingerman r0 = 0; \
391*82887c25SEduard Zingerman exit; \
392*82887c25SEduard Zingerman " :
393*82887c25SEduard Zingerman : __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
394*82887c25SEduard Zingerman __imm_insn(st_mem,
395*82887c25SEduard Zingerman BPF_ST_MEM(BPF_W, BPF_REG_1, offsetof(struct __sk_buff, mark), 42))
396*82887c25SEduard Zingerman : __clobber_all);
397*82887c25SEduard Zingerman }
398*82887c25SEduard Zingerman
399*82887c25SEduard Zingerman SEC("tc")
400*82887c25SEduard Zingerman __description("unpriv: spill/fill of different pointers stx - ctx and sock")
401*82887c25SEduard Zingerman __failure __msg("type=ctx expected=sock")
pointers_stx_ctx_and_sock(void)402*82887c25SEduard Zingerman __naked void pointers_stx_ctx_and_sock(void)
403*82887c25SEduard Zingerman {
404*82887c25SEduard Zingerman asm volatile (" \
405*82887c25SEduard Zingerman r8 = r1; \
406*82887c25SEduard Zingerman /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
407*82887c25SEduard Zingerman " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
408*82887c25SEduard Zingerman " r2 = r0; \
409*82887c25SEduard Zingerman /* u64 foo; */ \
410*82887c25SEduard Zingerman /* void *target = &foo; */ \
411*82887c25SEduard Zingerman r6 = r10; \
412*82887c25SEduard Zingerman r6 += -8; \
413*82887c25SEduard Zingerman r1 = r8; \
414*82887c25SEduard Zingerman /* if (skb == NULL) *target = sock; */ \
415*82887c25SEduard Zingerman if r1 == 0 goto l0_%=; \
416*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r2; \
417*82887c25SEduard Zingerman l0_%=: /* else *target = skb; */ \
418*82887c25SEduard Zingerman if r1 != 0 goto l1_%=; \
419*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r1; \
420*82887c25SEduard Zingerman l1_%=: /* struct __sk_buff *skb = *target; */ \
421*82887c25SEduard Zingerman r1 = *(u64*)(r6 + 0); \
422*82887c25SEduard Zingerman /* skb->mark = 42; */ \
423*82887c25SEduard Zingerman r3 = 42; \
424*82887c25SEduard Zingerman *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
425*82887c25SEduard Zingerman /* if (sk) bpf_sk_release(sk) */ \
426*82887c25SEduard Zingerman if r1 == 0 goto l2_%=; \
427*82887c25SEduard Zingerman call %[bpf_sk_release]; \
428*82887c25SEduard Zingerman l2_%=: r0 = 0; \
429*82887c25SEduard Zingerman exit; \
430*82887c25SEduard Zingerman " :
431*82887c25SEduard Zingerman : __imm(bpf_sk_lookup_tcp),
432*82887c25SEduard Zingerman __imm(bpf_sk_release),
433*82887c25SEduard Zingerman __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
434*82887c25SEduard Zingerman __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
435*82887c25SEduard Zingerman : __clobber_all);
436*82887c25SEduard Zingerman }
437*82887c25SEduard Zingerman
438*82887c25SEduard Zingerman SEC("tc")
439*82887c25SEduard Zingerman __description("unpriv: spill/fill of different pointers stx - leak sock")
440*82887c25SEduard Zingerman __failure
441*82887c25SEduard Zingerman //.errstr = "same insn cannot be used with different pointers",
442*82887c25SEduard Zingerman __msg("Unreleased reference")
different_pointers_stx_leak_sock(void)443*82887c25SEduard Zingerman __naked void different_pointers_stx_leak_sock(void)
444*82887c25SEduard Zingerman {
445*82887c25SEduard Zingerman asm volatile (" \
446*82887c25SEduard Zingerman r8 = r1; \
447*82887c25SEduard Zingerman /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
448*82887c25SEduard Zingerman " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
449*82887c25SEduard Zingerman " r2 = r0; \
450*82887c25SEduard Zingerman /* u64 foo; */ \
451*82887c25SEduard Zingerman /* void *target = &foo; */ \
452*82887c25SEduard Zingerman r6 = r10; \
453*82887c25SEduard Zingerman r6 += -8; \
454*82887c25SEduard Zingerman r1 = r8; \
455*82887c25SEduard Zingerman /* if (skb == NULL) *target = sock; */ \
456*82887c25SEduard Zingerman if r1 == 0 goto l0_%=; \
457*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r2; \
458*82887c25SEduard Zingerman l0_%=: /* else *target = skb; */ \
459*82887c25SEduard Zingerman if r1 != 0 goto l1_%=; \
460*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r1; \
461*82887c25SEduard Zingerman l1_%=: /* struct __sk_buff *skb = *target; */ \
462*82887c25SEduard Zingerman r1 = *(u64*)(r6 + 0); \
463*82887c25SEduard Zingerman /* skb->mark = 42; */ \
464*82887c25SEduard Zingerman r3 = 42; \
465*82887c25SEduard Zingerman *(u32*)(r1 + %[__sk_buff_mark]) = r3; \
466*82887c25SEduard Zingerman exit; \
467*82887c25SEduard Zingerman " :
468*82887c25SEduard Zingerman : __imm(bpf_sk_lookup_tcp),
469*82887c25SEduard Zingerman __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
470*82887c25SEduard Zingerman __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
471*82887c25SEduard Zingerman : __clobber_all);
472*82887c25SEduard Zingerman }
473*82887c25SEduard Zingerman
474*82887c25SEduard Zingerman SEC("tc")
475*82887c25SEduard Zingerman __description("unpriv: spill/fill of different pointers stx - sock and ctx (read)")
476*82887c25SEduard Zingerman __failure __msg("same insn cannot be used with different pointers")
stx_sock_and_ctx_read(void)477*82887c25SEduard Zingerman __naked void stx_sock_and_ctx_read(void)
478*82887c25SEduard Zingerman {
479*82887c25SEduard Zingerman asm volatile (" \
480*82887c25SEduard Zingerman r8 = r1; \
481*82887c25SEduard Zingerman /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
482*82887c25SEduard Zingerman " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
483*82887c25SEduard Zingerman " r2 = r0; \
484*82887c25SEduard Zingerman /* u64 foo; */ \
485*82887c25SEduard Zingerman /* void *target = &foo; */ \
486*82887c25SEduard Zingerman r6 = r10; \
487*82887c25SEduard Zingerman r6 += -8; \
488*82887c25SEduard Zingerman r1 = r8; \
489*82887c25SEduard Zingerman /* if (skb) *target = skb */ \
490*82887c25SEduard Zingerman if r1 == 0 goto l0_%=; \
491*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r1; \
492*82887c25SEduard Zingerman l0_%=: /* else *target = sock */ \
493*82887c25SEduard Zingerman if r1 != 0 goto l1_%=; \
494*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r2; \
495*82887c25SEduard Zingerman l1_%=: /* struct bpf_sock *sk = *target; */ \
496*82887c25SEduard Zingerman r1 = *(u64*)(r6 + 0); \
497*82887c25SEduard Zingerman /* if (sk) u32 foo = sk->mark; bpf_sk_release(sk); */\
498*82887c25SEduard Zingerman if r1 == 0 goto l2_%=; \
499*82887c25SEduard Zingerman r3 = *(u32*)(r1 + %[bpf_sock_mark]); \
500*82887c25SEduard Zingerman call %[bpf_sk_release]; \
501*82887c25SEduard Zingerman l2_%=: r0 = 0; \
502*82887c25SEduard Zingerman exit; \
503*82887c25SEduard Zingerman " :
504*82887c25SEduard Zingerman : __imm(bpf_sk_lookup_tcp),
505*82887c25SEduard Zingerman __imm(bpf_sk_release),
506*82887c25SEduard Zingerman __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
507*82887c25SEduard Zingerman __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
508*82887c25SEduard Zingerman : __clobber_all);
509*82887c25SEduard Zingerman }
510*82887c25SEduard Zingerman
511*82887c25SEduard Zingerman SEC("tc")
512*82887c25SEduard Zingerman __description("unpriv: spill/fill of different pointers stx - sock and ctx (write)")
513*82887c25SEduard Zingerman __failure
514*82887c25SEduard Zingerman //.errstr = "same insn cannot be used with different pointers",
515*82887c25SEduard Zingerman __msg("cannot write into sock")
stx_sock_and_ctx_write(void)516*82887c25SEduard Zingerman __naked void stx_sock_and_ctx_write(void)
517*82887c25SEduard Zingerman {
518*82887c25SEduard Zingerman asm volatile (" \
519*82887c25SEduard Zingerman r8 = r1; \
520*82887c25SEduard Zingerman /* struct bpf_sock *sock = bpf_sock_lookup(...); */\
521*82887c25SEduard Zingerman " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
522*82887c25SEduard Zingerman " r2 = r0; \
523*82887c25SEduard Zingerman /* u64 foo; */ \
524*82887c25SEduard Zingerman /* void *target = &foo; */ \
525*82887c25SEduard Zingerman r6 = r10; \
526*82887c25SEduard Zingerman r6 += -8; \
527*82887c25SEduard Zingerman r1 = r8; \
528*82887c25SEduard Zingerman /* if (skb) *target = skb */ \
529*82887c25SEduard Zingerman if r1 == 0 goto l0_%=; \
530*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r1; \
531*82887c25SEduard Zingerman l0_%=: /* else *target = sock */ \
532*82887c25SEduard Zingerman if r1 != 0 goto l1_%=; \
533*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r2; \
534*82887c25SEduard Zingerman l1_%=: /* struct bpf_sock *sk = *target; */ \
535*82887c25SEduard Zingerman r1 = *(u64*)(r6 + 0); \
536*82887c25SEduard Zingerman /* if (sk) sk->mark = 42; bpf_sk_release(sk); */\
537*82887c25SEduard Zingerman if r1 == 0 goto l2_%=; \
538*82887c25SEduard Zingerman r3 = 42; \
539*82887c25SEduard Zingerman *(u32*)(r1 + %[bpf_sock_mark]) = r3; \
540*82887c25SEduard Zingerman call %[bpf_sk_release]; \
541*82887c25SEduard Zingerman l2_%=: r0 = 0; \
542*82887c25SEduard Zingerman exit; \
543*82887c25SEduard Zingerman " :
544*82887c25SEduard Zingerman : __imm(bpf_sk_lookup_tcp),
545*82887c25SEduard Zingerman __imm(bpf_sk_release),
546*82887c25SEduard Zingerman __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
547*82887c25SEduard Zingerman __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
548*82887c25SEduard Zingerman : __clobber_all);
549*82887c25SEduard Zingerman }
550*82887c25SEduard Zingerman
551*82887c25SEduard Zingerman SEC("socket")
552*82887c25SEduard Zingerman __description("unpriv: write pointer into map elem value")
553*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R0 leaks addr")
554*82887c25SEduard Zingerman __retval(0)
pointer_into_map_elem_value(void)555*82887c25SEduard Zingerman __naked void pointer_into_map_elem_value(void)
556*82887c25SEduard Zingerman {
557*82887c25SEduard Zingerman asm volatile (" \
558*82887c25SEduard Zingerman r1 = 0; \
559*82887c25SEduard Zingerman *(u64*)(r10 - 8) = r1; \
560*82887c25SEduard Zingerman r2 = r10; \
561*82887c25SEduard Zingerman r2 += -8; \
562*82887c25SEduard Zingerman r1 = %[map_hash_8b] ll; \
563*82887c25SEduard Zingerman call %[bpf_map_lookup_elem]; \
564*82887c25SEduard Zingerman if r0 == 0 goto l0_%=; \
565*82887c25SEduard Zingerman *(u64*)(r0 + 0) = r0; \
566*82887c25SEduard Zingerman l0_%=: exit; \
567*82887c25SEduard Zingerman " :
568*82887c25SEduard Zingerman : __imm(bpf_map_lookup_elem),
569*82887c25SEduard Zingerman __imm_addr(map_hash_8b)
570*82887c25SEduard Zingerman : __clobber_all);
571*82887c25SEduard Zingerman }
572*82887c25SEduard Zingerman
573*82887c25SEduard Zingerman SEC("socket")
574*82887c25SEduard Zingerman __description("alu32: mov u32 const")
575*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R7 invalid mem access 'scalar'")
576*82887c25SEduard Zingerman __retval(0)
alu32_mov_u32_const(void)577*82887c25SEduard Zingerman __naked void alu32_mov_u32_const(void)
578*82887c25SEduard Zingerman {
579*82887c25SEduard Zingerman asm volatile (" \
580*82887c25SEduard Zingerman w7 = 0; \
581*82887c25SEduard Zingerman w7 &= 1; \
582*82887c25SEduard Zingerman w0 = w7; \
583*82887c25SEduard Zingerman if r0 == 0 goto l0_%=; \
584*82887c25SEduard Zingerman r0 = *(u64*)(r7 + 0); \
585*82887c25SEduard Zingerman l0_%=: exit; \
586*82887c25SEduard Zingerman " ::: __clobber_all);
587*82887c25SEduard Zingerman }
588*82887c25SEduard Zingerman
589*82887c25SEduard Zingerman SEC("socket")
590*82887c25SEduard Zingerman __description("unpriv: partial copy of pointer")
591*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R10 partial copy")
592*82887c25SEduard Zingerman __retval(0)
unpriv_partial_copy_of_pointer(void)593*82887c25SEduard Zingerman __naked void unpriv_partial_copy_of_pointer(void)
594*82887c25SEduard Zingerman {
595*82887c25SEduard Zingerman asm volatile (" \
596*82887c25SEduard Zingerman w1 = w10; \
597*82887c25SEduard Zingerman r0 = 0; \
598*82887c25SEduard Zingerman exit; \
599*82887c25SEduard Zingerman " ::: __clobber_all);
600*82887c25SEduard Zingerman }
601*82887c25SEduard Zingerman
602*82887c25SEduard Zingerman SEC("socket")
603*82887c25SEduard Zingerman __description("unpriv: pass pointer to tail_call")
604*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R3 leaks addr into helper")
605*82887c25SEduard Zingerman __retval(0)
pass_pointer_to_tail_call(void)606*82887c25SEduard Zingerman __naked void pass_pointer_to_tail_call(void)
607*82887c25SEduard Zingerman {
608*82887c25SEduard Zingerman asm volatile (" \
609*82887c25SEduard Zingerman r3 = r1; \
610*82887c25SEduard Zingerman r2 = %[map_prog1_socket] ll; \
611*82887c25SEduard Zingerman call %[bpf_tail_call]; \
612*82887c25SEduard Zingerman r0 = 0; \
613*82887c25SEduard Zingerman exit; \
614*82887c25SEduard Zingerman " :
615*82887c25SEduard Zingerman : __imm(bpf_tail_call),
616*82887c25SEduard Zingerman __imm_addr(map_prog1_socket)
617*82887c25SEduard Zingerman : __clobber_all);
618*82887c25SEduard Zingerman }
619*82887c25SEduard Zingerman
620*82887c25SEduard Zingerman SEC("socket")
621*82887c25SEduard Zingerman __description("unpriv: cmp map pointer with zero")
622*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R1 pointer comparison")
623*82887c25SEduard Zingerman __retval(0)
cmp_map_pointer_with_zero(void)624*82887c25SEduard Zingerman __naked void cmp_map_pointer_with_zero(void)
625*82887c25SEduard Zingerman {
626*82887c25SEduard Zingerman asm volatile (" \
627*82887c25SEduard Zingerman r1 = 0; \
628*82887c25SEduard Zingerman r1 = %[map_hash_8b] ll; \
629*82887c25SEduard Zingerman if r1 == 0 goto l0_%=; \
630*82887c25SEduard Zingerman l0_%=: r0 = 0; \
631*82887c25SEduard Zingerman exit; \
632*82887c25SEduard Zingerman " :
633*82887c25SEduard Zingerman : __imm_addr(map_hash_8b)
634*82887c25SEduard Zingerman : __clobber_all);
635*82887c25SEduard Zingerman }
636*82887c25SEduard Zingerman
637*82887c25SEduard Zingerman SEC("socket")
638*82887c25SEduard Zingerman __description("unpriv: write into frame pointer")
639*82887c25SEduard Zingerman __failure __msg("frame pointer is read only")
640*82887c25SEduard Zingerman __failure_unpriv
unpriv_write_into_frame_pointer(void)641*82887c25SEduard Zingerman __naked void unpriv_write_into_frame_pointer(void)
642*82887c25SEduard Zingerman {
643*82887c25SEduard Zingerman asm volatile (" \
644*82887c25SEduard Zingerman r10 = r1; \
645*82887c25SEduard Zingerman r0 = 0; \
646*82887c25SEduard Zingerman exit; \
647*82887c25SEduard Zingerman " ::: __clobber_all);
648*82887c25SEduard Zingerman }
649*82887c25SEduard Zingerman
650*82887c25SEduard Zingerman SEC("socket")
651*82887c25SEduard Zingerman __description("unpriv: spill/fill frame pointer")
652*82887c25SEduard Zingerman __failure __msg("frame pointer is read only")
653*82887c25SEduard Zingerman __failure_unpriv
unpriv_spill_fill_frame_pointer(void)654*82887c25SEduard Zingerman __naked void unpriv_spill_fill_frame_pointer(void)
655*82887c25SEduard Zingerman {
656*82887c25SEduard Zingerman asm volatile (" \
657*82887c25SEduard Zingerman r6 = r10; \
658*82887c25SEduard Zingerman r6 += -8; \
659*82887c25SEduard Zingerman *(u64*)(r6 + 0) = r10; \
660*82887c25SEduard Zingerman r10 = *(u64*)(r6 + 0); \
661*82887c25SEduard Zingerman r0 = 0; \
662*82887c25SEduard Zingerman exit; \
663*82887c25SEduard Zingerman " ::: __clobber_all);
664*82887c25SEduard Zingerman }
665*82887c25SEduard Zingerman
666*82887c25SEduard Zingerman SEC("socket")
667*82887c25SEduard Zingerman __description("unpriv: cmp of frame pointer")
668*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R10 pointer comparison")
669*82887c25SEduard Zingerman __retval(0)
unpriv_cmp_of_frame_pointer(void)670*82887c25SEduard Zingerman __naked void unpriv_cmp_of_frame_pointer(void)
671*82887c25SEduard Zingerman {
672*82887c25SEduard Zingerman asm volatile (" \
673*82887c25SEduard Zingerman if r10 == 0 goto l0_%=; \
674*82887c25SEduard Zingerman l0_%=: r0 = 0; \
675*82887c25SEduard Zingerman exit; \
676*82887c25SEduard Zingerman " ::: __clobber_all);
677*82887c25SEduard Zingerman }
678*82887c25SEduard Zingerman
679*82887c25SEduard Zingerman SEC("socket")
680*82887c25SEduard Zingerman __description("unpriv: adding of fp, reg")
681*82887c25SEduard Zingerman __success __failure_unpriv
682*82887c25SEduard Zingerman __msg_unpriv("R1 stack pointer arithmetic goes out of range")
683*82887c25SEduard Zingerman __retval(0)
unpriv_adding_of_fp_reg(void)684*82887c25SEduard Zingerman __naked void unpriv_adding_of_fp_reg(void)
685*82887c25SEduard Zingerman {
686*82887c25SEduard Zingerman asm volatile (" \
687*82887c25SEduard Zingerman r0 = 0; \
688*82887c25SEduard Zingerman r1 = 0; \
689*82887c25SEduard Zingerman r1 += r10; \
690*82887c25SEduard Zingerman *(u64*)(r1 - 8) = r0; \
691*82887c25SEduard Zingerman exit; \
692*82887c25SEduard Zingerman " ::: __clobber_all);
693*82887c25SEduard Zingerman }
694*82887c25SEduard Zingerman
695*82887c25SEduard Zingerman SEC("socket")
696*82887c25SEduard Zingerman __description("unpriv: adding of fp, imm")
697*82887c25SEduard Zingerman __success __failure_unpriv
698*82887c25SEduard Zingerman __msg_unpriv("R1 stack pointer arithmetic goes out of range")
699*82887c25SEduard Zingerman __retval(0)
unpriv_adding_of_fp_imm(void)700*82887c25SEduard Zingerman __naked void unpriv_adding_of_fp_imm(void)
701*82887c25SEduard Zingerman {
702*82887c25SEduard Zingerman asm volatile (" \
703*82887c25SEduard Zingerman r0 = 0; \
704*82887c25SEduard Zingerman r1 = r10; \
705*82887c25SEduard Zingerman r1 += 0; \
706*82887c25SEduard Zingerman *(u64*)(r1 - 8) = r0; \
707*82887c25SEduard Zingerman exit; \
708*82887c25SEduard Zingerman " ::: __clobber_all);
709*82887c25SEduard Zingerman }
710*82887c25SEduard Zingerman
711*82887c25SEduard Zingerman SEC("socket")
712*82887c25SEduard Zingerman __description("unpriv: cmp of stack pointer")
713*82887c25SEduard Zingerman __success __failure_unpriv __msg_unpriv("R2 pointer comparison")
714*82887c25SEduard Zingerman __retval(0)
unpriv_cmp_of_stack_pointer(void)715*82887c25SEduard Zingerman __naked void unpriv_cmp_of_stack_pointer(void)
716*82887c25SEduard Zingerman {
717*82887c25SEduard Zingerman asm volatile (" \
718*82887c25SEduard Zingerman r2 = r10; \
719*82887c25SEduard Zingerman r2 += -8; \
720*82887c25SEduard Zingerman if r2 == 0 goto l0_%=; \
721*82887c25SEduard Zingerman l0_%=: r0 = 0; \
722*82887c25SEduard Zingerman exit; \
723*82887c25SEduard Zingerman " ::: __clobber_all);
724*82887c25SEduard Zingerman }
725*82887c25SEduard Zingerman
726*82887c25SEduard Zingerman char _license[] SEC("license") = "GPL";
727