1 // SPDX-License-Identifier: GPL-2.0 2 #include <vmlinux.h> 3 #include <bpf/bpf_helpers.h> 4 5 #define EAFNOSUPPORT 97 6 #define EPROTO 71 7 #define ENONET 64 8 #define EINVAL 22 9 #define ENOENT 2 10 11 extern unsigned long CONFIG_HZ __kconfig; 12 13 int test_einval_bpf_tuple = 0; 14 int test_einval_reserved = 0; 15 int test_einval_netns_id = 0; 16 int test_einval_len_opts = 0; 17 int test_eproto_l4proto = 0; 18 int test_enonet_netns_id = 0; 19 int test_enoent_lookup = 0; 20 int test_eafnosupport = 0; 21 int test_alloc_entry = -EINVAL; 22 int test_insert_entry = -EAFNOSUPPORT; 23 int test_succ_lookup = -ENOENT; 24 u32 test_delta_timeout = 0; 25 u32 test_status = 0; 26 __be32 saddr = 0; 27 __be16 sport = 0; 28 __be32 daddr = 0; 29 __be16 dport = 0; 30 int test_exist_lookup = -ENOENT; 31 u32 test_exist_lookup_mark = 0; 32 33 struct nf_conn; 34 35 struct bpf_ct_opts___local { 36 s32 netns_id; 37 s32 error; 38 u8 l4proto; 39 u8 reserved[3]; 40 } __attribute__((preserve_access_index)); 41 42 struct nf_conn *bpf_xdp_ct_alloc(struct xdp_md *, struct bpf_sock_tuple *, u32, 43 struct bpf_ct_opts___local *, u32) __ksym; 44 struct nf_conn *bpf_xdp_ct_lookup(struct xdp_md *, struct bpf_sock_tuple *, u32, 45 struct bpf_ct_opts___local *, u32) __ksym; 46 struct nf_conn *bpf_skb_ct_alloc(struct __sk_buff *, struct bpf_sock_tuple *, u32, 47 struct bpf_ct_opts___local *, u32) __ksym; 48 struct nf_conn *bpf_skb_ct_lookup(struct __sk_buff *, struct bpf_sock_tuple *, u32, 49 struct bpf_ct_opts___local *, u32) __ksym; 50 struct nf_conn *bpf_ct_insert_entry(struct nf_conn *) __ksym; 51 void bpf_ct_release(struct nf_conn *) __ksym; 52 void bpf_ct_set_timeout(struct nf_conn *, u32) __ksym; 53 int bpf_ct_change_timeout(struct nf_conn *, u32) __ksym; 54 int bpf_ct_set_status(struct nf_conn *, u32) __ksym; 55 int bpf_ct_change_status(struct nf_conn *, u32) __ksym; 56 57 static __always_inline void 58 nf_ct_test(struct nf_conn *(*lookup_fn)(void *, struct bpf_sock_tuple *, u32, 59 struct bpf_ct_opts___local *, u32), 60 struct nf_conn *(*alloc_fn)(void *, struct bpf_sock_tuple *, u32, 61 struct bpf_ct_opts___local *, u32), 62 void *ctx) 63 { 64 struct bpf_ct_opts___local opts_def = { .l4proto = IPPROTO_TCP, .netns_id = -1 }; 65 struct bpf_sock_tuple bpf_tuple; 66 struct nf_conn *ct; 67 int err; 68 69 __builtin_memset(&bpf_tuple, 0, sizeof(bpf_tuple.ipv4)); 70 71 ct = lookup_fn(ctx, NULL, 0, &opts_def, sizeof(opts_def)); 72 if (ct) 73 bpf_ct_release(ct); 74 else 75 test_einval_bpf_tuple = opts_def.error; 76 77 opts_def.reserved[0] = 1; 78 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, 79 sizeof(opts_def)); 80 opts_def.reserved[0] = 0; 81 opts_def.l4proto = IPPROTO_TCP; 82 if (ct) 83 bpf_ct_release(ct); 84 else 85 test_einval_reserved = opts_def.error; 86 87 opts_def.netns_id = -2; 88 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, 89 sizeof(opts_def)); 90 opts_def.netns_id = -1; 91 if (ct) 92 bpf_ct_release(ct); 93 else 94 test_einval_netns_id = opts_def.error; 95 96 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, 97 sizeof(opts_def) - 1); 98 if (ct) 99 bpf_ct_release(ct); 100 else 101 test_einval_len_opts = opts_def.error; 102 103 opts_def.l4proto = IPPROTO_ICMP; 104 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, 105 sizeof(opts_def)); 106 opts_def.l4proto = IPPROTO_TCP; 107 if (ct) 108 bpf_ct_release(ct); 109 else 110 test_eproto_l4proto = opts_def.error; 111 112 opts_def.netns_id = 0xf00f; 113 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, 114 sizeof(opts_def)); 115 opts_def.netns_id = -1; 116 if (ct) 117 bpf_ct_release(ct); 118 else 119 test_enonet_netns_id = opts_def.error; 120 121 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, 122 sizeof(opts_def)); 123 if (ct) 124 bpf_ct_release(ct); 125 else 126 test_enoent_lookup = opts_def.error; 127 128 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4) - 1, &opts_def, 129 sizeof(opts_def)); 130 if (ct) 131 bpf_ct_release(ct); 132 else 133 test_eafnosupport = opts_def.error; 134 135 bpf_tuple.ipv4.saddr = bpf_get_prandom_u32(); /* src IP */ 136 bpf_tuple.ipv4.daddr = bpf_get_prandom_u32(); /* dst IP */ 137 bpf_tuple.ipv4.sport = bpf_get_prandom_u32(); /* src port */ 138 bpf_tuple.ipv4.dport = bpf_get_prandom_u32(); /* dst port */ 139 140 ct = alloc_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, 141 sizeof(opts_def)); 142 if (ct) { 143 struct nf_conn *ct_ins; 144 145 bpf_ct_set_timeout(ct, 10000); 146 bpf_ct_set_status(ct, IPS_CONFIRMED); 147 148 ct_ins = bpf_ct_insert_entry(ct); 149 if (ct_ins) { 150 struct nf_conn *ct_lk; 151 152 ct_lk = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), 153 &opts_def, sizeof(opts_def)); 154 if (ct_lk) { 155 /* update ct entry timeout */ 156 bpf_ct_change_timeout(ct_lk, 10000); 157 test_delta_timeout = ct_lk->timeout - bpf_jiffies64(); 158 test_delta_timeout /= CONFIG_HZ; 159 test_status = IPS_SEEN_REPLY; 160 bpf_ct_change_status(ct_lk, IPS_SEEN_REPLY); 161 bpf_ct_release(ct_lk); 162 test_succ_lookup = 0; 163 } 164 bpf_ct_release(ct_ins); 165 test_insert_entry = 0; 166 } 167 test_alloc_entry = 0; 168 } 169 170 bpf_tuple.ipv4.saddr = saddr; 171 bpf_tuple.ipv4.daddr = daddr; 172 bpf_tuple.ipv4.sport = sport; 173 bpf_tuple.ipv4.dport = dport; 174 ct = lookup_fn(ctx, &bpf_tuple, sizeof(bpf_tuple.ipv4), &opts_def, 175 sizeof(opts_def)); 176 if (ct) { 177 test_exist_lookup = 0; 178 if (ct->mark == 42) 179 test_exist_lookup_mark = 43; 180 bpf_ct_release(ct); 181 } else { 182 test_exist_lookup = opts_def.error; 183 } 184 } 185 186 SEC("xdp") 187 int nf_xdp_ct_test(struct xdp_md *ctx) 188 { 189 nf_ct_test((void *)bpf_xdp_ct_lookup, (void *)bpf_xdp_ct_alloc, ctx); 190 return 0; 191 } 192 193 SEC("tc") 194 int nf_skb_ct_test(struct __sk_buff *ctx) 195 { 196 nf_ct_test((void *)bpf_skb_ct_lookup, (void *)bpf_skb_ct_alloc, ctx); 197 return 0; 198 } 199 200 char _license[] SEC("license") = "GPL"; 201