1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2017-2018 Covalent IO, Inc. http://covalent.io */
3 #include <stddef.h>
4 #include <string.h>
5 #include <linux/bpf.h>
6 #include <linux/if_ether.h>
7 #include <linux/if_packet.h>
8 #include <linux/ip.h>
9 #include <linux/ipv6.h>
10 #include <linux/in.h>
11 #include <linux/udp.h>
12 #include <linux/tcp.h>
13 #include <linux/pkt_cls.h>
14 #include <sys/socket.h>
15 #include <bpf/bpf_helpers.h>
16 #include <bpf/bpf_endian.h>
17 
18 /* Sockmap sample program connects a client and a backend together
19  * using cgroups.
20  *
21  *    client:X <---> frontend:80 client:X <---> backend:80
22  *
23  * For simplicity we hard code values here and bind 1:1. The hard
24  * coded values are part of the setup in sockmap.sh script that
25  * is associated with this BPF program.
26  *
27  * The bpf_printk is verbose and prints information as connections
28  * are established and verdicts are decided.
29  */
30 
31 struct {
32 	__uint(type, TEST_MAP_TYPE);
33 	__uint(max_entries, 20);
34 	__uint(key_size, sizeof(int));
35 	__uint(value_size, sizeof(int));
36 } sock_map SEC(".maps");
37 
38 struct {
39 	__uint(type, TEST_MAP_TYPE);
40 	__uint(max_entries, 20);
41 	__uint(key_size, sizeof(int));
42 	__uint(value_size, sizeof(int));
43 } sock_map_txmsg SEC(".maps");
44 
45 struct {
46 	__uint(type, TEST_MAP_TYPE);
47 	__uint(max_entries, 20);
48 	__uint(key_size, sizeof(int));
49 	__uint(value_size, sizeof(int));
50 } sock_map_redir SEC(".maps");
51 
52 struct {
53 	__uint(type, BPF_MAP_TYPE_ARRAY);
54 	__uint(max_entries, 1);
55 	__type(key, int);
56 	__type(value, int);
57 } sock_apply_bytes SEC(".maps");
58 
59 struct {
60 	__uint(type, BPF_MAP_TYPE_ARRAY);
61 	__uint(max_entries, 1);
62 	__type(key, int);
63 	__type(value, int);
64 } sock_cork_bytes SEC(".maps");
65 
66 struct {
67 	__uint(type, BPF_MAP_TYPE_ARRAY);
68 	__uint(max_entries, 6);
69 	__type(key, int);
70 	__type(value, int);
71 } sock_bytes SEC(".maps");
72 
73 struct {
74 	__uint(type, BPF_MAP_TYPE_ARRAY);
75 	__uint(max_entries, 1);
76 	__type(key, int);
77 	__type(value, int);
78 } sock_redir_flags SEC(".maps");
79 
80 struct {
81 	__uint(type, BPF_MAP_TYPE_ARRAY);
82 	__uint(max_entries, 3);
83 	__type(key, int);
84 	__type(value, int);
85 } sock_skb_opts SEC(".maps");
86 
87 struct {
88 	__uint(type, TEST_MAP_TYPE);
89 	__uint(max_entries, 20);
90 	__uint(key_size, sizeof(int));
91 	__uint(value_size, sizeof(int));
92 } tls_sock_map SEC(".maps");
93 
94 SEC("sk_skb1")
95 int bpf_prog1(struct __sk_buff *skb)
96 {
97 	int *f, two = 2;
98 
99 	f = bpf_map_lookup_elem(&sock_skb_opts, &two);
100 	if (f && *f) {
101 		return *f;
102 	}
103 	return skb->len;
104 }
105 
106 SEC("sk_skb2")
107 int bpf_prog2(struct __sk_buff *skb)
108 {
109 	__u32 lport = skb->local_port;
110 	__u32 rport = skb->remote_port;
111 	int len, *f, ret, zero = 0;
112 	__u64 flags = 0;
113 
114 	if (lport == 10000)
115 		ret = 10;
116 	else
117 		ret = 1;
118 
119 	len = (__u32)skb->data_end - (__u32)skb->data;
120 	f = bpf_map_lookup_elem(&sock_skb_opts, &zero);
121 	if (f && *f) {
122 		ret = 3;
123 		flags = *f;
124 	}
125 
126 #ifdef SOCKMAP
127 	return bpf_sk_redirect_map(skb, &sock_map, ret, flags);
128 #else
129 	return bpf_sk_redirect_hash(skb, &sock_map, &ret, flags);
130 #endif
131 
132 }
133 
134 static inline void bpf_write_pass(struct __sk_buff *skb, int offset)
135 {
136 	int err = bpf_skb_pull_data(skb, 6 + offset);
137 	void *data_end;
138 	char *c;
139 
140 	if (err)
141 		return;
142 
143 	c = (char *)(long)skb->data;
144 	data_end = (void *)(long)skb->data_end;
145 
146 	if (c + 5 + offset < data_end)
147 		memcpy(c + offset, "PASS", 4);
148 }
149 
150 SEC("sk_skb3")
151 int bpf_prog3(struct __sk_buff *skb)
152 {
153 	int err, *f, ret = SK_PASS;
154 	const int one = 1;
155 
156 	f = bpf_map_lookup_elem(&sock_skb_opts, &one);
157 	if (f && *f) {
158 		__u64 flags = 0;
159 
160 		ret = 0;
161 		flags = *f;
162 
163 		err = bpf_skb_adjust_room(skb, -13, 0, 0);
164 		if (err)
165 			return SK_DROP;
166 		err = bpf_skb_adjust_room(skb, 4, 0, 0);
167 		if (err)
168 			return SK_DROP;
169 		bpf_write_pass(skb, 0);
170 #ifdef SOCKMAP
171 		return bpf_sk_redirect_map(skb, &tls_sock_map, ret, flags);
172 #else
173 		return bpf_sk_redirect_hash(skb, &tls_sock_map, &ret, flags);
174 #endif
175 	}
176 	f = bpf_map_lookup_elem(&sock_skb_opts, &one);
177 	if (f && *f)
178 		ret = SK_DROP;
179 	err = bpf_skb_adjust_room(skb, 4, 0, 0);
180 	if (err)
181 		return SK_DROP;
182 	bpf_write_pass(skb, 13);
183 tls_out:
184 	return ret;
185 }
186 
187 SEC("sockops")
188 int bpf_sockmap(struct bpf_sock_ops *skops)
189 {
190 	__u32 lport, rport;
191 	int op, err = 0, index, key, ret;
192 
193 
194 	op = (int) skops->op;
195 
196 	switch (op) {
197 	case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB:
198 		lport = skops->local_port;
199 		rport = skops->remote_port;
200 
201 		if (lport == 10000) {
202 			ret = 1;
203 #ifdef SOCKMAP
204 			err = bpf_sock_map_update(skops, &sock_map, &ret,
205 						  BPF_NOEXIST);
206 #else
207 			err = bpf_sock_hash_update(skops, &sock_map, &ret,
208 						   BPF_NOEXIST);
209 #endif
210 		}
211 		break;
212 	case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB:
213 		lport = skops->local_port;
214 		rport = skops->remote_port;
215 
216 		if (bpf_ntohl(rport) == 10001) {
217 			ret = 10;
218 #ifdef SOCKMAP
219 			err = bpf_sock_map_update(skops, &sock_map, &ret,
220 						  BPF_NOEXIST);
221 #else
222 			err = bpf_sock_hash_update(skops, &sock_map, &ret,
223 						   BPF_NOEXIST);
224 #endif
225 		}
226 		break;
227 	default:
228 		break;
229 	}
230 
231 	return 0;
232 }
233 
234 SEC("sk_msg1")
235 int bpf_prog4(struct sk_msg_md *msg)
236 {
237 	int *bytes, zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
238 	int *start, *end, *start_push, *end_push, *start_pop, *pop;
239 
240 	bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
241 	if (bytes)
242 		bpf_msg_apply_bytes(msg, *bytes);
243 	bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
244 	if (bytes)
245 		bpf_msg_cork_bytes(msg, *bytes);
246 	start = bpf_map_lookup_elem(&sock_bytes, &zero);
247 	end = bpf_map_lookup_elem(&sock_bytes, &one);
248 	if (start && end)
249 		bpf_msg_pull_data(msg, *start, *end, 0);
250 	start_push = bpf_map_lookup_elem(&sock_bytes, &two);
251 	end_push = bpf_map_lookup_elem(&sock_bytes, &three);
252 	if (start_push && end_push)
253 		bpf_msg_push_data(msg, *start_push, *end_push, 0);
254 	start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
255 	pop = bpf_map_lookup_elem(&sock_bytes, &five);
256 	if (start_pop && pop)
257 		bpf_msg_pop_data(msg, *start_pop, *pop, 0);
258 	return SK_PASS;
259 }
260 
261 SEC("sk_msg2")
262 int bpf_prog6(struct sk_msg_md *msg)
263 {
264 	int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5, key = 0;
265 	int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop, *f;
266 	__u64 flags = 0;
267 
268 	bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
269 	if (bytes)
270 		bpf_msg_apply_bytes(msg, *bytes);
271 	bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
272 	if (bytes)
273 		bpf_msg_cork_bytes(msg, *bytes);
274 
275 	start = bpf_map_lookup_elem(&sock_bytes, &zero);
276 	end = bpf_map_lookup_elem(&sock_bytes, &one);
277 	if (start && end)
278 		bpf_msg_pull_data(msg, *start, *end, 0);
279 
280 	start_push = bpf_map_lookup_elem(&sock_bytes, &two);
281 	end_push = bpf_map_lookup_elem(&sock_bytes, &three);
282 	if (start_push && end_push)
283 		bpf_msg_push_data(msg, *start_push, *end_push, 0);
284 
285 	start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
286 	pop = bpf_map_lookup_elem(&sock_bytes, &five);
287 	if (start_pop && pop)
288 		bpf_msg_pop_data(msg, *start_pop, *pop, 0);
289 
290 	f = bpf_map_lookup_elem(&sock_redir_flags, &zero);
291 	if (f && *f) {
292 		key = 2;
293 		flags = *f;
294 	}
295 #ifdef SOCKMAP
296 	return bpf_msg_redirect_map(msg, &sock_map_redir, key, flags);
297 #else
298 	return bpf_msg_redirect_hash(msg, &sock_map_redir, &key, flags);
299 #endif
300 }
301 
302 SEC("sk_msg3")
303 int bpf_prog8(struct sk_msg_md *msg)
304 {
305 	void *data_end = (void *)(long) msg->data_end;
306 	void *data = (void *)(long) msg->data;
307 	int ret = 0, *bytes, zero = 0;
308 
309 	bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
310 	if (bytes) {
311 		ret = bpf_msg_apply_bytes(msg, *bytes);
312 		if (ret)
313 			return SK_DROP;
314 	} else {
315 		return SK_DROP;
316 	}
317 	return SK_PASS;
318 }
319 SEC("sk_msg4")
320 int bpf_prog9(struct sk_msg_md *msg)
321 {
322 	void *data_end = (void *)(long) msg->data_end;
323 	void *data = (void *)(long) msg->data;
324 	int ret = 0, *bytes, zero = 0;
325 
326 	bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
327 	if (bytes) {
328 		if (((__u64)data_end - (__u64)data) >= *bytes)
329 			return SK_PASS;
330 		ret = bpf_msg_cork_bytes(msg, *bytes);
331 		if (ret)
332 			return SK_DROP;
333 	}
334 	return SK_PASS;
335 }
336 
337 SEC("sk_msg5")
338 int bpf_prog10(struct sk_msg_md *msg)
339 {
340 	int *bytes, *start, *end, *start_push, *end_push, *start_pop, *pop;
341 	int zero = 0, one = 1, two = 2, three = 3, four = 4, five = 5;
342 
343 	bytes = bpf_map_lookup_elem(&sock_apply_bytes, &zero);
344 	if (bytes)
345 		bpf_msg_apply_bytes(msg, *bytes);
346 	bytes = bpf_map_lookup_elem(&sock_cork_bytes, &zero);
347 	if (bytes)
348 		bpf_msg_cork_bytes(msg, *bytes);
349 	start = bpf_map_lookup_elem(&sock_bytes, &zero);
350 	end = bpf_map_lookup_elem(&sock_bytes, &one);
351 	if (start && end)
352 		bpf_msg_pull_data(msg, *start, *end, 0);
353 	start_push = bpf_map_lookup_elem(&sock_bytes, &two);
354 	end_push = bpf_map_lookup_elem(&sock_bytes, &three);
355 	if (start_push && end_push)
356 		bpf_msg_push_data(msg, *start_push, *end_push, 0);
357 	start_pop = bpf_map_lookup_elem(&sock_bytes, &four);
358 	pop = bpf_map_lookup_elem(&sock_bytes, &five);
359 	if (start_pop && pop)
360 		bpf_msg_pop_data(msg, *start_pop, *pop, 0);
361 	return SK_DROP;
362 }
363 
364 char _license[] SEC("license") = "GPL";
365