1 /* SPDX-License-Identifier: GPL-2.0 */ 2 /* Copyright (c) 2017-2018 Covalent IO, Inc. http://covalent.io */ 3 #include <stddef.h> 4 #include <string.h> 5 #include <linux/bpf.h> 6 #include <linux/if_ether.h> 7 #include <linux/if_packet.h> 8 #include <linux/ip.h> 9 #include <linux/ipv6.h> 10 #include <linux/in.h> 11 #include <linux/udp.h> 12 #include <linux/tcp.h> 13 #include <linux/pkt_cls.h> 14 #include <sys/socket.h> 15 #include <bpf/bpf_helpers.h> 16 #include <bpf/bpf_endian.h> 17 #include "bpf_misc.h" 18 19 /* Sockmap sample program connects a client and a backend together 20 * using cgroups. 21 * 22 * client:X <---> frontend:80 client:X <---> backend:80 23 * 24 * For simplicity we hard code values here and bind 1:1. The hard 25 * coded values are part of the setup in sockmap.sh script that 26 * is associated with this BPF program. 27 * 28 * The bpf_printk is verbose and prints information as connections 29 * are established and verdicts are decided. 30 */ 31 32 struct { 33 __uint(type, TEST_MAP_TYPE); 34 __uint(max_entries, 20); 35 __uint(key_size, sizeof(int)); 36 __uint(value_size, sizeof(int)); 37 } sock_map SEC(".maps"); 38 39 struct { 40 __uint(type, TEST_MAP_TYPE); 41 __uint(max_entries, 20); 42 __uint(key_size, sizeof(int)); 43 __uint(value_size, sizeof(int)); 44 } sock_map_txmsg SEC(".maps"); 45 46 struct { 47 __uint(type, TEST_MAP_TYPE); 48 __uint(max_entries, 20); 49 __uint(key_size, sizeof(int)); 50 __uint(value_size, sizeof(int)); 51 } sock_map_redir SEC(".maps"); 52 53 struct { 54 __uint(type, BPF_MAP_TYPE_ARRAY); 55 __uint(max_entries, 1); 56 __type(key, int); 57 __type(value, int); 58 } sock_apply_bytes SEC(".maps"); 59 60 struct { 61 __uint(type, BPF_MAP_TYPE_ARRAY); 62 __uint(max_entries, 1); 63 __type(key, int); 64 __type(value, int); 65 } sock_cork_bytes SEC(".maps"); 66 67 struct { 68 __uint(type, BPF_MAP_TYPE_ARRAY); 69 __uint(max_entries, 6); 70 __type(key, int); 71 __type(value, int); 72 } sock_bytes SEC(".maps"); 73 74 struct { 75 __uint(type, BPF_MAP_TYPE_ARRAY); 76 __uint(max_entries, 1); 77 __type(key, int); 78 __type(value, int); 79 } sock_redir_flags SEC(".maps"); 80 81 struct { 82 __uint(type, BPF_MAP_TYPE_ARRAY); 83 __uint(max_entries, 3); 84 __type(key, int); 85 __type(value, int); 86 } sock_skb_opts SEC(".maps"); 87 88 struct { 89 __uint(type, TEST_MAP_TYPE); 90 __uint(max_entries, 20); 91 __uint(key_size, sizeof(int)); 92 __uint(value_size, sizeof(int)); 93 } tls_sock_map SEC(".maps"); 94 95 SEC("sk_skb1") 96 int bpf_prog1(struct __sk_buff *skb) 97 { 98 int *f, two = 2; 99 100 f = bpf_map_lookup_elem(&sock_skb_opts, &two); 101 if (f && *f) { 102 return *f; 103 } 104 return skb->len; 105 } 106 107 SEC("sk_skb2") 108 int bpf_prog2(struct __sk_buff *skb) 109 { 110 __u32 lport = skb->local_port; 111 __u32 rport = skb->remote_port; 112 int len, *f, ret, zero = 0; 113 __u64 flags = 0; 114 115 __sink(rport); 116 if (lport == 10000) 117 ret = 10; 118 else 119 ret = 1; 120 121 len = (__u32)skb->data_end - (__u32)skb->data; 122 __sink(len); 123 124 f = bpf_map_lookup_elem(&sock_skb_opts, &zero); 125 if (f && *f) { 126 ret = 3; 127 flags = *f; 128 } 129 130 #ifdef SOCKMAP 131 return bpf_sk_redirect_map(skb, &sock_map, ret, flags); 132 #else 133 return bpf_sk_redirect_hash(skb, &sock_map, &ret, flags); 134 #endif 135 136 } 137 138 static inline void bpf_write_pass(struct __sk_buff *skb, int offset) 139 { 140 int err = bpf_skb_pull_data(skb, 6 + offset); 141 void *data_end; 142 char *c; 143 144 if (err) 145 return; 146 147 c = (char *)(long)skb->data; 148 data_end = (void *)(long)skb->data_end; 149 150 if (c + 5 + offset < data_end) 151 memcpy(c + offset, "PASS", 4); 152 } 153 154 SEC("sk_skb3") 155 int bpf_prog3(struct __sk_buff *skb) 156 { 157 int err, *f, ret = SK_PASS; 158 const int one = 1; 159 160 f = bpf_map_lookup_elem(&sock_skb_opts, &one); 161 if (f && *f) { 162 __u64 flags = 0; 163 164 ret = 0; 165 flags = *f; 166 167 err = bpf_skb_adjust_room(skb, -13, 0, 0); 168 if (err) 169 return SK_DROP; 170 err = bpf_skb_adjust_room(skb, 4, 0, 0); 171 if (err) 172 return SK_DROP; 173 bpf_write_pass(skb, 0); 174 #ifdef SOCKMAP 175 return bpf_sk_redirect_map(skb, &tls_sock_map, ret, flags); 176 #else 177 return bpf_sk_redirect_hash(skb, &tls_sock_map, &ret, flags); 178 #endif 179 } 180 f = bpf_map_lookup_elem(&sock_skb_opts, &one); 181 if (f && *f) 182 ret = SK_DROP; 183 err = bpf_skb_adjust_room(skb, 4, 0, 0); 184 if (err) 185 return SK_DROP; 186 bpf_write_pass(skb, 13); 187 return ret; 188 } 189 190 SEC("sockops") 191 int bpf_sockmap(struct bpf_sock_ops *skops) 192 { 193 __u32 lport, rport; 194 int op, err, ret; 195 196 op = (int) skops->op; 197 198 switch (op) { 199 case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: 200 lport = skops->local_port; 201 rport = skops->remote_port; 202 203 if (lport == 10000) { 204 ret = 1; 205 #ifdef SOCKMAP 206 err = bpf_sock_map_update(skops, &sock_map, &ret, 207 BPF_NOEXIST); 208 #else 209 err = bpf_sock_hash_update(skops, &sock_map, &ret, 210 BPF_NOEXIST); 211 #endif 212 } 213 break; 214 case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: 215 lport = skops->local_port; 216 rport = skops->remote_port; 217 218 if (bpf_ntohl(rport) == 10001) { 219 ret = 10; 220 #ifdef SOCKMAP 221 err = bpf_sock_map_update(skops, &sock_map, &ret, 222 BPF_NOEXIST); 223 #else 224 err = bpf_sock_hash_update(skops, &sock_map, &ret, 225 BPF_NOEXIST); 226 #endif 227 } 228 break; 229 default: 230 break; 231 } 232 233 __sink(err); 234 235 return 0; 236 } 237 238 SEC("sk_msg1") 239 int bpf_prog4(struct sk_msg_md *msg) 240 { 241 int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5; 242 int *start, *end, *start_push, *end_push, *start_pop, *pop, err = 0; 243 244 bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); 245 if (bytes) 246 bpf_msg_apply_bytes(msg, *bytes); 247 bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero); 248 if (bytes) 249 bpf_msg_cork_bytes(msg, *bytes); 250 start = bpf_map_lookup_elem(&sock_bytes, &zero); 251 end = bpf_map_lookup_elem(&sock_bytes, &one); 252 if (start && end) 253 bpf_msg_pull_data(msg, *start, *end, 0); 254 start_push = bpf_map_lookup_elem(&sock_bytes, &two); 255 end_push = bpf_map_lookup_elem(&sock_bytes, &three); 256 if (start_push && end_push) { 257 err = bpf_msg_push_data(msg, *start_push, *end_push, 0); 258 if (err) 259 return SK_DROP; 260 } 261 start_pop = bpf_map_lookup_elem(&sock_bytes, &four); 262 pop = bpf_map_lookup_elem(&sock_bytes, &five); 263 if (start_pop && pop) 264 bpf_msg_pop_data(msg, *start_pop, *pop, 0); 265 return SK_PASS; 266 } 267 268 SEC("sk_msg2") 269 int bpf_prog6(struct sk_msg_md *msg) 270 { 271 int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0; 272 int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f; 273 int err = 0; 274 __u64 flags = 0; 275 276 bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); 277 if (bytes) 278 bpf_msg_apply_bytes(msg, *bytes); 279 bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero); 280 if (bytes) 281 bpf_msg_cork_bytes(msg, *bytes); 282 283 start = bpf_map_lookup_elem(&sock_bytes, &zero); 284 end = bpf_map_lookup_elem(&sock_bytes, &one); 285 if (start && end) 286 bpf_msg_pull_data(msg, *start, *end, 0); 287 288 start_push = bpf_map_lookup_elem(&sock_bytes, &two); 289 end_push = bpf_map_lookup_elem(&sock_bytes, &three); 290 if (start_push && end_push) { 291 err = bpf_msg_push_data(msg, *start_push, *end_push, 0); 292 if (err) 293 return SK_DROP; 294 } 295 296 start_pop = bpf_map_lookup_elem(&sock_bytes, &four); 297 pop = bpf_map_lookup_elem(&sock_bytes, &five); 298 if (start_pop && pop) 299 bpf_msg_pop_data(msg, *start_pop, *pop, 0); 300 301 f = bpf_map_lookup_elem(&sock_redir_flags, &zero); 302 if (f && *f) { 303 key = 2; 304 flags = *f; 305 } 306 #ifdef SOCKMAP 307 return bpf_msg_redirect_map(msg, &sock_map_redir, key, flags); 308 #else 309 return bpf_msg_redirect_hash(msg, &sock_map_redir, &key, flags); 310 #endif 311 } 312 313 SEC("sk_msg3") 314 int bpf_prog8(struct sk_msg_md *msg) 315 { 316 void *data_end = (void *)(long) msg->data_end; 317 void *data = (void *)(long) msg->data; 318 int ret = 0, *bytes, zero = 0; 319 320 bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); 321 if (bytes) { 322 ret = bpf_msg_apply_bytes(msg, *bytes); 323 if (ret) 324 return SK_DROP; 325 } else { 326 return SK_DROP; 327 } 328 329 __sink(data_end); 330 __sink(data); 331 332 return SK_PASS; 333 } 334 SEC("sk_msg4") 335 int bpf_prog9(struct sk_msg_md *msg) 336 { 337 void *data_end = (void *)(long) msg->data_end; 338 void *data = (void *)(long) msg->data; 339 int ret = 0, *bytes, zero = 0; 340 341 bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero); 342 if (bytes) { 343 if (((__u64)data_end - (__u64)data) >= *bytes) 344 return SK_PASS; 345 ret = bpf_msg_cork_bytes(msg, *bytes); 346 if (ret) 347 return SK_DROP; 348 } 349 return SK_PASS; 350 } 351 352 SEC("sk_msg5") 353 int bpf_prog10(struct sk_msg_md *msg) 354 { 355 int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop; 356 int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, err = 0; 357 358 bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero); 359 if (bytes) 360 bpf_msg_apply_bytes(msg, *bytes); 361 bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero); 362 if (bytes) 363 bpf_msg_cork_bytes(msg, *bytes); 364 start = bpf_map_lookup_elem(&sock_bytes, &zero); 365 end = bpf_map_lookup_elem(&sock_bytes, &one); 366 if (start && end) 367 bpf_msg_pull_data(msg, *start, *end, 0); 368 start_push = bpf_map_lookup_elem(&sock_bytes, &two); 369 end_push = bpf_map_lookup_elem(&sock_bytes, &three); 370 if (start_push && end_push) { 371 err = bpf_msg_push_data(msg, *start_push, *end_push, 0); 372 if (err) 373 return SK_PASS; 374 } 375 start_pop = bpf_map_lookup_elem(&sock_bytes, &four); 376 pop = bpf_map_lookup_elem(&sock_bytes, &five); 377 if (start_pop && pop) 378 bpf_msg_pop_data(msg, *start_pop, *pop, 0); 379 return SK_DROP; 380 } 381 382 char _license[] SEC("license") = "GPL"; 383