1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Copyright (c) 2017 Facebook 3 */ 4 #include <stddef.h> 5 #include <string.h> 6 #include <linux/bpf.h> 7 #include <linux/if_ether.h> 8 #include <linux/if_packet.h> 9 #include <linux/ip.h> 10 #include <linux/ipv6.h> 11 #include <linux/in.h> 12 #include <linux/tcp.h> 13 #include <linux/pkt_cls.h> 14 #include <bpf/bpf_helpers.h> 15 #include <bpf/bpf_endian.h> 16 17 #define barrier() __asm__ __volatile__("": : :"memory") 18 19 /* llvm will optimize both subprograms into exactly the same BPF assembly 20 * 21 * Disassembly of section .text: 22 * 23 * 0000000000000000 test_pkt_access_subprog1: 24 * ; return skb->len * 2; 25 * 0: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0) 26 * 1: 64 00 00 00 01 00 00 00 w0 <<= 1 27 * 2: 95 00 00 00 00 00 00 00 exit 28 * 29 * 0000000000000018 test_pkt_access_subprog2: 30 * ; return skb->len * val; 31 * 3: 61 10 00 00 00 00 00 00 r0 = *(u32 *)(r1 + 0) 32 * 4: 64 00 00 00 01 00 00 00 w0 <<= 1 33 * 5: 95 00 00 00 00 00 00 00 exit 34 * 35 * Which makes it an interesting test for BTF-enabled verifier. 36 */ 37 static __attribute__ ((noinline)) 38 int test_pkt_access_subprog1(volatile struct __sk_buff *skb) 39 { 40 return skb->len * 2; 41 } 42 43 static __attribute__ ((noinline)) 44 int test_pkt_access_subprog2(int val, volatile struct __sk_buff *skb) 45 { 46 return skb->len * val; 47 } 48 49 #define MAX_STACK (512 - 2 * 32) 50 51 __attribute__ ((noinline)) 52 int get_skb_len(struct __sk_buff *skb) 53 { 54 volatile char buf[MAX_STACK] = {}; 55 56 return skb->len; 57 } 58 59 __attribute__ ((noinline)) 60 int get_constant(long val) 61 { 62 return val - 122; 63 } 64 65 int get_skb_ifindex(int, struct __sk_buff *skb, int); 66 67 __attribute__ ((noinline)) 68 int test_pkt_access_subprog3(int val, struct __sk_buff *skb) 69 { 70 return get_skb_len(skb) * get_skb_ifindex(val, skb, get_constant(123)); 71 } 72 73 __attribute__ ((noinline)) 74 int get_skb_ifindex(int val, struct __sk_buff *skb, int var) 75 { 76 volatile char buf[MAX_STACK] = {}; 77 78 return skb->ifindex * val * var; 79 } 80 81 __attribute__ ((noinline)) 82 int test_pkt_write_access_subprog(struct __sk_buff *skb, __u32 off) 83 { 84 void *data = (void *)(long)skb->data; 85 void *data_end = (void *)(long)skb->data_end; 86 struct tcphdr *tcp = NULL; 87 88 if (off > sizeof(struct ethhdr) + sizeof(struct ipv6hdr)) 89 return -1; 90 91 tcp = data + off; 92 if (tcp + 1 > data_end) 93 return -1; 94 /* make modification to the packet data */ 95 tcp->check++; 96 return 0; 97 } 98 99 SEC("tc") 100 int test_pkt_access(struct __sk_buff *skb) 101 { 102 void *data_end = (void *)(long)skb->data_end; 103 void *data = (void *)(long)skb->data; 104 struct ethhdr *eth = (struct ethhdr *)(data); 105 struct tcphdr *tcp = NULL; 106 __u8 proto = 255; 107 __u64 ihl_len; 108 109 if (eth + 1 > data_end) 110 return TC_ACT_SHOT; 111 112 if (eth->h_proto == bpf_htons(ETH_P_IP)) { 113 struct iphdr *iph = (struct iphdr *)(eth + 1); 114 115 if (iph + 1 > data_end) 116 return TC_ACT_SHOT; 117 ihl_len = iph->ihl * 4; 118 proto = iph->protocol; 119 tcp = (struct tcphdr *)((void *)(iph) + ihl_len); 120 } else if (eth->h_proto == bpf_htons(ETH_P_IPV6)) { 121 struct ipv6hdr *ip6h = (struct ipv6hdr *)(eth + 1); 122 123 if (ip6h + 1 > data_end) 124 return TC_ACT_SHOT; 125 ihl_len = sizeof(*ip6h); 126 proto = ip6h->nexthdr; 127 tcp = (struct tcphdr *)((void *)(ip6h) + ihl_len); 128 } 129 130 if (test_pkt_access_subprog1(skb) != skb->len * 2) 131 return TC_ACT_SHOT; 132 if (test_pkt_access_subprog2(2, skb) != skb->len * 2) 133 return TC_ACT_SHOT; 134 if (test_pkt_access_subprog3(3, skb) != skb->len * 3 * skb->ifindex) 135 return TC_ACT_SHOT; 136 if (tcp) { 137 if (test_pkt_write_access_subprog(skb, (void *)tcp - data)) 138 return TC_ACT_SHOT; 139 if (((void *)(tcp) + 20) > data_end || proto != 6) 140 return TC_ACT_SHOT; 141 barrier(); /* to force ordering of checks */ 142 if (((void *)(tcp) + 18) > data_end) 143 return TC_ACT_SHOT; 144 if (tcp->urg_ptr == 123) 145 return TC_ACT_OK; 146 } 147 148 return TC_ACT_UNSPEC; 149 } 150