15a77a01fSEduard Zingerman // SPDX-License-Identifier: GPL-2.0
25a77a01fSEduard Zingerman /* Converted from tools/testing/selftests/bpf/verifier/raw_stack.c */
35a77a01fSEduard Zingerman
45a77a01fSEduard Zingerman #include <linux/bpf.h>
55a77a01fSEduard Zingerman #include <bpf/bpf_helpers.h>
65a77a01fSEduard Zingerman #include "bpf_misc.h"
75a77a01fSEduard Zingerman
80954982dSAndrei Matei SEC("socket")
95a77a01fSEduard Zingerman __description("raw_stack: no skb_load_bytes")
100954982dSAndrei Matei __success
110954982dSAndrei Matei __failure_unpriv __msg_unpriv("invalid read from stack R6 off=-8 size=8")
stack_no_skb_load_bytes(void)125a77a01fSEduard Zingerman __naked void stack_no_skb_load_bytes(void)
135a77a01fSEduard Zingerman {
145a77a01fSEduard Zingerman asm volatile (" \
155a77a01fSEduard Zingerman r2 = 4; \
165a77a01fSEduard Zingerman r6 = r10; \
175a77a01fSEduard Zingerman r6 += -8; \
185a77a01fSEduard Zingerman r3 = r6; \
195a77a01fSEduard Zingerman r4 = 8; \
205a77a01fSEduard Zingerman /* Call to skb_load_bytes() omitted. */ \
215a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
225a77a01fSEduard Zingerman exit; \
235a77a01fSEduard Zingerman " ::: __clobber_all);
245a77a01fSEduard Zingerman }
255a77a01fSEduard Zingerman
265a77a01fSEduard Zingerman SEC("tc")
275a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, negative len")
285a77a01fSEduard Zingerman __failure __msg("R4 min value is negative")
skb_load_bytes_negative_len(void)295a77a01fSEduard Zingerman __naked void skb_load_bytes_negative_len(void)
305a77a01fSEduard Zingerman {
315a77a01fSEduard Zingerman asm volatile (" \
325a77a01fSEduard Zingerman r2 = 4; \
335a77a01fSEduard Zingerman r6 = r10; \
345a77a01fSEduard Zingerman r6 += -8; \
355a77a01fSEduard Zingerman r3 = r6; \
365a77a01fSEduard Zingerman r4 = -8; \
375a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
385a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
395a77a01fSEduard Zingerman exit; \
405a77a01fSEduard Zingerman " :
415a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes)
425a77a01fSEduard Zingerman : __clobber_all);
435a77a01fSEduard Zingerman }
445a77a01fSEduard Zingerman
455a77a01fSEduard Zingerman SEC("tc")
465a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, negative len 2")
475a77a01fSEduard Zingerman __failure __msg("R4 min value is negative")
load_bytes_negative_len_2(void)485a77a01fSEduard Zingerman __naked void load_bytes_negative_len_2(void)
495a77a01fSEduard Zingerman {
505a77a01fSEduard Zingerman asm volatile (" \
515a77a01fSEduard Zingerman r2 = 4; \
525a77a01fSEduard Zingerman r6 = r10; \
535a77a01fSEduard Zingerman r6 += -8; \
545a77a01fSEduard Zingerman r3 = r6; \
555a77a01fSEduard Zingerman r4 = %[__imm_0]; \
565a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
575a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
585a77a01fSEduard Zingerman exit; \
595a77a01fSEduard Zingerman " :
605a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes),
615a77a01fSEduard Zingerman __imm_const(__imm_0, ~0)
625a77a01fSEduard Zingerman : __clobber_all);
635a77a01fSEduard Zingerman }
645a77a01fSEduard Zingerman
655a77a01fSEduard Zingerman SEC("tc")
665a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, zero len")
67*d1100acaSAndrei Matei __failure __msg("R4 invalid zero-sized read: u64=[0,0]")
skb_load_bytes_zero_len(void)685a77a01fSEduard Zingerman __naked void skb_load_bytes_zero_len(void)
695a77a01fSEduard Zingerman {
705a77a01fSEduard Zingerman asm volatile (" \
715a77a01fSEduard Zingerman r2 = 4; \
725a77a01fSEduard Zingerman r6 = r10; \
735a77a01fSEduard Zingerman r6 += -8; \
745a77a01fSEduard Zingerman r3 = r6; \
755a77a01fSEduard Zingerman r4 = 0; \
765a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
775a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
785a77a01fSEduard Zingerman exit; \
795a77a01fSEduard Zingerman " :
805a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes)
815a77a01fSEduard Zingerman : __clobber_all);
825a77a01fSEduard Zingerman }
835a77a01fSEduard Zingerman
845a77a01fSEduard Zingerman SEC("tc")
855a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, no init")
865a77a01fSEduard Zingerman __success __retval(0)
skb_load_bytes_no_init(void)875a77a01fSEduard Zingerman __naked void skb_load_bytes_no_init(void)
885a77a01fSEduard Zingerman {
895a77a01fSEduard Zingerman asm volatile (" \
905a77a01fSEduard Zingerman r2 = 4; \
915a77a01fSEduard Zingerman r6 = r10; \
925a77a01fSEduard Zingerman r6 += -8; \
935a77a01fSEduard Zingerman r3 = r6; \
945a77a01fSEduard Zingerman r4 = 8; \
955a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
965a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
975a77a01fSEduard Zingerman exit; \
985a77a01fSEduard Zingerman " :
995a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes)
1005a77a01fSEduard Zingerman : __clobber_all);
1015a77a01fSEduard Zingerman }
1025a77a01fSEduard Zingerman
1035a77a01fSEduard Zingerman SEC("tc")
1045a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, init")
1055a77a01fSEduard Zingerman __success __retval(0)
stack_skb_load_bytes_init(void)1065a77a01fSEduard Zingerman __naked void stack_skb_load_bytes_init(void)
1075a77a01fSEduard Zingerman {
1085a77a01fSEduard Zingerman asm volatile (" \
1095a77a01fSEduard Zingerman r2 = 4; \
1105a77a01fSEduard Zingerman r6 = r10; \
1115a77a01fSEduard Zingerman r6 += -8; \
1125a77a01fSEduard Zingerman r3 = 0xcafe; \
1135a77a01fSEduard Zingerman *(u64*)(r6 + 0) = r3; \
1145a77a01fSEduard Zingerman r3 = r6; \
1155a77a01fSEduard Zingerman r4 = 8; \
1165a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
1175a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
1185a77a01fSEduard Zingerman exit; \
1195a77a01fSEduard Zingerman " :
1205a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes)
1215a77a01fSEduard Zingerman : __clobber_all);
1225a77a01fSEduard Zingerman }
1235a77a01fSEduard Zingerman
1245a77a01fSEduard Zingerman SEC("tc")
1255a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, spilled regs around bounds")
1265a77a01fSEduard Zingerman __success __retval(0)
bytes_spilled_regs_around_bounds(void)1275a77a01fSEduard Zingerman __naked void bytes_spilled_regs_around_bounds(void)
1285a77a01fSEduard Zingerman {
1295a77a01fSEduard Zingerman asm volatile (" \
1305a77a01fSEduard Zingerman r2 = 4; \
1315a77a01fSEduard Zingerman r6 = r10; \
1325a77a01fSEduard Zingerman r6 += -16; \
1335a77a01fSEduard Zingerman *(u64*)(r6 - 8) = r1; \
1345a77a01fSEduard Zingerman *(u64*)(r6 + 8) = r1; \
1355a77a01fSEduard Zingerman r3 = r6; \
1365a77a01fSEduard Zingerman r4 = 8; \
1375a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
1385a77a01fSEduard Zingerman r0 = *(u64*)(r6 - 8); \
1395a77a01fSEduard Zingerman r2 = *(u64*)(r6 + 8); \
1405a77a01fSEduard Zingerman r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
1415a77a01fSEduard Zingerman r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
1425a77a01fSEduard Zingerman r0 += r2; \
1435a77a01fSEduard Zingerman exit; \
1445a77a01fSEduard Zingerman " :
1455a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes),
1465a77a01fSEduard Zingerman __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
1475a77a01fSEduard Zingerman __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
1485a77a01fSEduard Zingerman : __clobber_all);
1495a77a01fSEduard Zingerman }
1505a77a01fSEduard Zingerman
1515a77a01fSEduard Zingerman SEC("tc")
1525a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, spilled regs corruption")
1535a77a01fSEduard Zingerman __failure __msg("R0 invalid mem access 'scalar'")
__flag(BPF_F_ANY_ALIGNMENT)1545a77a01fSEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
1555a77a01fSEduard Zingerman __naked void load_bytes_spilled_regs_corruption(void)
1565a77a01fSEduard Zingerman {
1575a77a01fSEduard Zingerman asm volatile (" \
1585a77a01fSEduard Zingerman r2 = 4; \
1595a77a01fSEduard Zingerman r6 = r10; \
1605a77a01fSEduard Zingerman r6 += -8; \
1615a77a01fSEduard Zingerman *(u64*)(r6 + 0) = r1; \
1625a77a01fSEduard Zingerman r3 = r6; \
1635a77a01fSEduard Zingerman r4 = 8; \
1645a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
1655a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
1665a77a01fSEduard Zingerman r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
1675a77a01fSEduard Zingerman exit; \
1685a77a01fSEduard Zingerman " :
1695a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes),
1705a77a01fSEduard Zingerman __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
1715a77a01fSEduard Zingerman : __clobber_all);
1725a77a01fSEduard Zingerman }
1735a77a01fSEduard Zingerman
1745a77a01fSEduard Zingerman SEC("tc")
1755a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, spilled regs corruption 2")
1765a77a01fSEduard Zingerman __failure __msg("R3 invalid mem access 'scalar'")
__flag(BPF_F_ANY_ALIGNMENT)1775a77a01fSEduard Zingerman __flag(BPF_F_ANY_ALIGNMENT)
1785a77a01fSEduard Zingerman __naked void bytes_spilled_regs_corruption_2(void)
1795a77a01fSEduard Zingerman {
1805a77a01fSEduard Zingerman asm volatile (" \
1815a77a01fSEduard Zingerman r2 = 4; \
1825a77a01fSEduard Zingerman r6 = r10; \
1835a77a01fSEduard Zingerman r6 += -16; \
1845a77a01fSEduard Zingerman *(u64*)(r6 - 8) = r1; \
1855a77a01fSEduard Zingerman *(u64*)(r6 + 0) = r1; \
1865a77a01fSEduard Zingerman *(u64*)(r6 + 8) = r1; \
1875a77a01fSEduard Zingerman r3 = r6; \
1885a77a01fSEduard Zingerman r4 = 8; \
1895a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
1905a77a01fSEduard Zingerman r0 = *(u64*)(r6 - 8); \
1915a77a01fSEduard Zingerman r2 = *(u64*)(r6 + 8); \
1925a77a01fSEduard Zingerman r3 = *(u64*)(r6 + 0); \
1935a77a01fSEduard Zingerman r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
1945a77a01fSEduard Zingerman r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
1955a77a01fSEduard Zingerman r0 += r2; \
1965a77a01fSEduard Zingerman r3 = *(u32*)(r3 + %[__sk_buff_pkt_type]); \
1975a77a01fSEduard Zingerman r0 += r3; \
1985a77a01fSEduard Zingerman exit; \
1995a77a01fSEduard Zingerman " :
2005a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes),
2015a77a01fSEduard Zingerman __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
2025a77a01fSEduard Zingerman __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
2035a77a01fSEduard Zingerman __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
2045a77a01fSEduard Zingerman : __clobber_all);
2055a77a01fSEduard Zingerman }
2065a77a01fSEduard Zingerman
2075a77a01fSEduard Zingerman SEC("tc")
2085a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, spilled regs + data")
2095a77a01fSEduard Zingerman __success __retval(0)
load_bytes_spilled_regs_data(void)2105a77a01fSEduard Zingerman __naked void load_bytes_spilled_regs_data(void)
2115a77a01fSEduard Zingerman {
2125a77a01fSEduard Zingerman asm volatile (" \
2135a77a01fSEduard Zingerman r2 = 4; \
2145a77a01fSEduard Zingerman r6 = r10; \
2155a77a01fSEduard Zingerman r6 += -16; \
2165a77a01fSEduard Zingerman *(u64*)(r6 - 8) = r1; \
2175a77a01fSEduard Zingerman *(u64*)(r6 + 0) = r1; \
2185a77a01fSEduard Zingerman *(u64*)(r6 + 8) = r1; \
2195a77a01fSEduard Zingerman r3 = r6; \
2205a77a01fSEduard Zingerman r4 = 8; \
2215a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
2225a77a01fSEduard Zingerman r0 = *(u64*)(r6 - 8); \
2235a77a01fSEduard Zingerman r2 = *(u64*)(r6 + 8); \
2245a77a01fSEduard Zingerman r3 = *(u64*)(r6 + 0); \
2255a77a01fSEduard Zingerman r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
2265a77a01fSEduard Zingerman r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
2275a77a01fSEduard Zingerman r0 += r2; \
2285a77a01fSEduard Zingerman r0 += r3; \
2295a77a01fSEduard Zingerman exit; \
2305a77a01fSEduard Zingerman " :
2315a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes),
2325a77a01fSEduard Zingerman __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
2335a77a01fSEduard Zingerman __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
2345a77a01fSEduard Zingerman : __clobber_all);
2355a77a01fSEduard Zingerman }
2365a77a01fSEduard Zingerman
2375a77a01fSEduard Zingerman SEC("tc")
2385a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, invalid access 1")
2395a77a01fSEduard Zingerman __failure __msg("invalid indirect access to stack R3 off=-513 size=8")
load_bytes_invalid_access_1(void)2405a77a01fSEduard Zingerman __naked void load_bytes_invalid_access_1(void)
2415a77a01fSEduard Zingerman {
2425a77a01fSEduard Zingerman asm volatile (" \
2435a77a01fSEduard Zingerman r2 = 4; \
2445a77a01fSEduard Zingerman r6 = r10; \
2455a77a01fSEduard Zingerman r6 += -513; \
2465a77a01fSEduard Zingerman r3 = r6; \
2475a77a01fSEduard Zingerman r4 = 8; \
2485a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
2495a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
2505a77a01fSEduard Zingerman exit; \
2515a77a01fSEduard Zingerman " :
2525a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes)
2535a77a01fSEduard Zingerman : __clobber_all);
2545a77a01fSEduard Zingerman }
2555a77a01fSEduard Zingerman
2565a77a01fSEduard Zingerman SEC("tc")
2575a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, invalid access 2")
2585a77a01fSEduard Zingerman __failure __msg("invalid indirect access to stack R3 off=-1 size=8")
load_bytes_invalid_access_2(void)2595a77a01fSEduard Zingerman __naked void load_bytes_invalid_access_2(void)
2605a77a01fSEduard Zingerman {
2615a77a01fSEduard Zingerman asm volatile (" \
2625a77a01fSEduard Zingerman r2 = 4; \
2635a77a01fSEduard Zingerman r6 = r10; \
2645a77a01fSEduard Zingerman r6 += -1; \
2655a77a01fSEduard Zingerman r3 = r6; \
2665a77a01fSEduard Zingerman r4 = 8; \
2675a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
2685a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
2695a77a01fSEduard Zingerman exit; \
2705a77a01fSEduard Zingerman " :
2715a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes)
2725a77a01fSEduard Zingerman : __clobber_all);
2735a77a01fSEduard Zingerman }
2745a77a01fSEduard Zingerman
2755a77a01fSEduard Zingerman SEC("tc")
2765a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, invalid access 3")
2775a77a01fSEduard Zingerman __failure __msg("R4 min value is negative")
load_bytes_invalid_access_3(void)2785a77a01fSEduard Zingerman __naked void load_bytes_invalid_access_3(void)
2795a77a01fSEduard Zingerman {
2805a77a01fSEduard Zingerman asm volatile (" \
2815a77a01fSEduard Zingerman r2 = 4; \
2825a77a01fSEduard Zingerman r6 = r10; \
2835a77a01fSEduard Zingerman r6 += 0xffffffff; \
2845a77a01fSEduard Zingerman r3 = r6; \
2855a77a01fSEduard Zingerman r4 = 0xffffffff; \
2865a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
2875a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
2885a77a01fSEduard Zingerman exit; \
2895a77a01fSEduard Zingerman " :
2905a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes)
2915a77a01fSEduard Zingerman : __clobber_all);
2925a77a01fSEduard Zingerman }
2935a77a01fSEduard Zingerman
2945a77a01fSEduard Zingerman SEC("tc")
2955a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, invalid access 4")
2965a77a01fSEduard Zingerman __failure
2975a77a01fSEduard Zingerman __msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
load_bytes_invalid_access_4(void)2985a77a01fSEduard Zingerman __naked void load_bytes_invalid_access_4(void)
2995a77a01fSEduard Zingerman {
3005a77a01fSEduard Zingerman asm volatile (" \
3015a77a01fSEduard Zingerman r2 = 4; \
3025a77a01fSEduard Zingerman r6 = r10; \
3035a77a01fSEduard Zingerman r6 += -1; \
3045a77a01fSEduard Zingerman r3 = r6; \
3055a77a01fSEduard Zingerman r4 = 0x7fffffff; \
3065a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
3075a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
3085a77a01fSEduard Zingerman exit; \
3095a77a01fSEduard Zingerman " :
3105a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes)
3115a77a01fSEduard Zingerman : __clobber_all);
3125a77a01fSEduard Zingerman }
3135a77a01fSEduard Zingerman
3145a77a01fSEduard Zingerman SEC("tc")
3155a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, invalid access 5")
3165a77a01fSEduard Zingerman __failure
3175a77a01fSEduard Zingerman __msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
load_bytes_invalid_access_5(void)3185a77a01fSEduard Zingerman __naked void load_bytes_invalid_access_5(void)
3195a77a01fSEduard Zingerman {
3205a77a01fSEduard Zingerman asm volatile (" \
3215a77a01fSEduard Zingerman r2 = 4; \
3225a77a01fSEduard Zingerman r6 = r10; \
3235a77a01fSEduard Zingerman r6 += -512; \
3245a77a01fSEduard Zingerman r3 = r6; \
3255a77a01fSEduard Zingerman r4 = 0x7fffffff; \
3265a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
3275a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
3285a77a01fSEduard Zingerman exit; \
3295a77a01fSEduard Zingerman " :
3305a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes)
3315a77a01fSEduard Zingerman : __clobber_all);
3325a77a01fSEduard Zingerman }
3335a77a01fSEduard Zingerman
3345a77a01fSEduard Zingerman SEC("tc")
3355a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, invalid access 6")
3365a77a01fSEduard Zingerman __failure __msg("invalid zero-sized read")
load_bytes_invalid_access_6(void)3375a77a01fSEduard Zingerman __naked void load_bytes_invalid_access_6(void)
3385a77a01fSEduard Zingerman {
3395a77a01fSEduard Zingerman asm volatile (" \
3405a77a01fSEduard Zingerman r2 = 4; \
3415a77a01fSEduard Zingerman r6 = r10; \
3425a77a01fSEduard Zingerman r6 += -512; \
3435a77a01fSEduard Zingerman r3 = r6; \
3445a77a01fSEduard Zingerman r4 = 0; \
3455a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
3465a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
3475a77a01fSEduard Zingerman exit; \
3485a77a01fSEduard Zingerman " :
3495a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes)
3505a77a01fSEduard Zingerman : __clobber_all);
3515a77a01fSEduard Zingerman }
3525a77a01fSEduard Zingerman
3535a77a01fSEduard Zingerman SEC("tc")
3545a77a01fSEduard Zingerman __description("raw_stack: skb_load_bytes, large access")
3555a77a01fSEduard Zingerman __success __retval(0)
skb_load_bytes_large_access(void)3565a77a01fSEduard Zingerman __naked void skb_load_bytes_large_access(void)
3575a77a01fSEduard Zingerman {
3585a77a01fSEduard Zingerman asm volatile (" \
3595a77a01fSEduard Zingerman r2 = 4; \
3605a77a01fSEduard Zingerman r6 = r10; \
3615a77a01fSEduard Zingerman r6 += -512; \
3625a77a01fSEduard Zingerman r3 = r6; \
3635a77a01fSEduard Zingerman r4 = 512; \
3645a77a01fSEduard Zingerman call %[bpf_skb_load_bytes]; \
3655a77a01fSEduard Zingerman r0 = *(u64*)(r6 + 0); \
3665a77a01fSEduard Zingerman exit; \
3675a77a01fSEduard Zingerman " :
3685a77a01fSEduard Zingerman : __imm(bpf_skb_load_bytes)
3695a77a01fSEduard Zingerman : __clobber_all);
3705a77a01fSEduard Zingerman }
3715a77a01fSEduard Zingerman
3725a77a01fSEduard Zingerman char _license[] SEC("license") = "GPL";
373