xref: /openbmc/linux/tools/testing/selftests/bpf/progs/test_sk_lookup.c (revision f97cee494dc92395a668445bcd24d34c89f4ff8c)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 // Copyright (c) 2020 Cloudflare
3 
4 #include <errno.h>
5 #include <stdbool.h>
6 #include <stddef.h>
7 #include <linux/bpf.h>
8 #include <linux/in.h>
9 #include <sys/socket.h>
10 
11 #include <bpf/bpf_endian.h>
12 #include <bpf/bpf_helpers.h>
13 
14 #define IP4(a, b, c, d)					\
15 	bpf_htonl((((__u32)(a) & 0xffU) << 24) |	\
16 		  (((__u32)(b) & 0xffU) << 16) |	\
17 		  (((__u32)(c) & 0xffU) <<  8) |	\
18 		  (((__u32)(d) & 0xffU) <<  0))
19 #define IP6(aaaa, bbbb, cccc, dddd)			\
20 	{ bpf_htonl(aaaa), bpf_htonl(bbbb), bpf_htonl(cccc), bpf_htonl(dddd) }
21 
22 #define MAX_SOCKS 32
23 
24 struct {
25 	__uint(type, BPF_MAP_TYPE_SOCKMAP);
26 	__uint(max_entries, MAX_SOCKS);
27 	__type(key, __u32);
28 	__type(value, __u64);
29 } redir_map SEC(".maps");
30 
31 struct {
32 	__uint(type, BPF_MAP_TYPE_ARRAY);
33 	__uint(max_entries, 2);
34 	__type(key, int);
35 	__type(value, int);
36 } run_map SEC(".maps");
37 
38 enum {
39 	PROG1 = 0,
40 	PROG2,
41 };
42 
43 enum {
44 	SERVER_A = 0,
45 	SERVER_B,
46 };
47 
48 /* Addressable key/value constants for convenience */
49 static const int KEY_PROG1 = PROG1;
50 static const int KEY_PROG2 = PROG2;
51 static const int PROG_DONE = 1;
52 
53 static const __u32 KEY_SERVER_A = SERVER_A;
54 static const __u32 KEY_SERVER_B = SERVER_B;
55 
56 static const __u16 DST_PORT = 7007; /* Host byte order */
57 static const __u32 DST_IP4 = IP4(127, 0, 0, 1);
58 static const __u32 DST_IP6[] = IP6(0xfd000000, 0x0, 0x0, 0x00000001);
59 
60 SEC("sk_lookup/lookup_pass")
61 int lookup_pass(struct bpf_sk_lookup *ctx)
62 {
63 	return SK_PASS;
64 }
65 
66 SEC("sk_lookup/lookup_drop")
67 int lookup_drop(struct bpf_sk_lookup *ctx)
68 {
69 	return SK_DROP;
70 }
71 
72 SEC("sk_reuseport/reuse_pass")
73 int reuseport_pass(struct sk_reuseport_md *ctx)
74 {
75 	return SK_PASS;
76 }
77 
78 SEC("sk_reuseport/reuse_drop")
79 int reuseport_drop(struct sk_reuseport_md *ctx)
80 {
81 	return SK_DROP;
82 }
83 
84 /* Redirect packets destined for port DST_PORT to socket at redir_map[0]. */
85 SEC("sk_lookup/redir_port")
86 int redir_port(struct bpf_sk_lookup *ctx)
87 {
88 	struct bpf_sock *sk;
89 	int err;
90 
91 	if (ctx->local_port != DST_PORT)
92 		return SK_PASS;
93 
94 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
95 	if (!sk)
96 		return SK_PASS;
97 
98 	err = bpf_sk_assign(ctx, sk, 0);
99 	bpf_sk_release(sk);
100 	return err ? SK_DROP : SK_PASS;
101 }
102 
103 /* Redirect packets destined for DST_IP4 address to socket at redir_map[0]. */
104 SEC("sk_lookup/redir_ip4")
105 int redir_ip4(struct bpf_sk_lookup *ctx)
106 {
107 	struct bpf_sock *sk;
108 	int err;
109 
110 	if (ctx->family != AF_INET)
111 		return SK_PASS;
112 	if (ctx->local_port != DST_PORT)
113 		return SK_PASS;
114 	if (ctx->local_ip4 != DST_IP4)
115 		return SK_PASS;
116 
117 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
118 	if (!sk)
119 		return SK_PASS;
120 
121 	err = bpf_sk_assign(ctx, sk, 0);
122 	bpf_sk_release(sk);
123 	return err ? SK_DROP : SK_PASS;
124 }
125 
126 /* Redirect packets destined for DST_IP6 address to socket at redir_map[0]. */
127 SEC("sk_lookup/redir_ip6")
128 int redir_ip6(struct bpf_sk_lookup *ctx)
129 {
130 	struct bpf_sock *sk;
131 	int err;
132 
133 	if (ctx->family != AF_INET6)
134 		return SK_PASS;
135 	if (ctx->local_port != DST_PORT)
136 		return SK_PASS;
137 	if (ctx->local_ip6[0] != DST_IP6[0] ||
138 	    ctx->local_ip6[1] != DST_IP6[1] ||
139 	    ctx->local_ip6[2] != DST_IP6[2] ||
140 	    ctx->local_ip6[3] != DST_IP6[3])
141 		return SK_PASS;
142 
143 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
144 	if (!sk)
145 		return SK_PASS;
146 
147 	err = bpf_sk_assign(ctx, sk, 0);
148 	bpf_sk_release(sk);
149 	return err ? SK_DROP : SK_PASS;
150 }
151 
152 SEC("sk_lookup/select_sock_a")
153 int select_sock_a(struct bpf_sk_lookup *ctx)
154 {
155 	struct bpf_sock *sk;
156 	int err;
157 
158 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
159 	if (!sk)
160 		return SK_PASS;
161 
162 	err = bpf_sk_assign(ctx, sk, 0);
163 	bpf_sk_release(sk);
164 	return err ? SK_DROP : SK_PASS;
165 }
166 
167 SEC("sk_lookup/select_sock_a_no_reuseport")
168 int select_sock_a_no_reuseport(struct bpf_sk_lookup *ctx)
169 {
170 	struct bpf_sock *sk;
171 	int err;
172 
173 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
174 	if (!sk)
175 		return SK_DROP;
176 
177 	err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_NO_REUSEPORT);
178 	bpf_sk_release(sk);
179 	return err ? SK_DROP : SK_PASS;
180 }
181 
182 SEC("sk_reuseport/select_sock_b")
183 int select_sock_b(struct sk_reuseport_md *ctx)
184 {
185 	__u32 key = KEY_SERVER_B;
186 	int err;
187 
188 	err = bpf_sk_select_reuseport(ctx, &redir_map, &key, 0);
189 	return err ? SK_DROP : SK_PASS;
190 }
191 
192 /* Check that bpf_sk_assign() returns -EEXIST if socket already selected. */
193 SEC("sk_lookup/sk_assign_eexist")
194 int sk_assign_eexist(struct bpf_sk_lookup *ctx)
195 {
196 	struct bpf_sock *sk;
197 	int err, ret;
198 
199 	ret = SK_DROP;
200 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
201 	if (!sk)
202 		goto out;
203 	err = bpf_sk_assign(ctx, sk, 0);
204 	if (err)
205 		goto out;
206 	bpf_sk_release(sk);
207 
208 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
209 	if (!sk)
210 		goto out;
211 	err = bpf_sk_assign(ctx, sk, 0);
212 	if (err != -EEXIST) {
213 		bpf_printk("sk_assign returned %d, expected %d\n",
214 			   err, -EEXIST);
215 		goto out;
216 	}
217 
218 	ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
219 out:
220 	if (sk)
221 		bpf_sk_release(sk);
222 	return ret;
223 }
224 
225 /* Check that bpf_sk_assign(BPF_SK_LOOKUP_F_REPLACE) can override selection. */
226 SEC("sk_lookup/sk_assign_replace_flag")
227 int sk_assign_replace_flag(struct bpf_sk_lookup *ctx)
228 {
229 	struct bpf_sock *sk;
230 	int err, ret;
231 
232 	ret = SK_DROP;
233 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
234 	if (!sk)
235 		goto out;
236 	err = bpf_sk_assign(ctx, sk, 0);
237 	if (err)
238 		goto out;
239 	bpf_sk_release(sk);
240 
241 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
242 	if (!sk)
243 		goto out;
244 	err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
245 	if (err) {
246 		bpf_printk("sk_assign returned %d, expected 0\n", err);
247 		goto out;
248 	}
249 
250 	ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
251 out:
252 	if (sk)
253 		bpf_sk_release(sk);
254 	return ret;
255 }
256 
257 /* Check that bpf_sk_assign(sk=NULL) is accepted. */
258 SEC("sk_lookup/sk_assign_null")
259 int sk_assign_null(struct bpf_sk_lookup *ctx)
260 {
261 	struct bpf_sock *sk = NULL;
262 	int err, ret;
263 
264 	ret = SK_DROP;
265 
266 	err = bpf_sk_assign(ctx, NULL, 0);
267 	if (err) {
268 		bpf_printk("sk_assign returned %d, expected 0\n", err);
269 		goto out;
270 	}
271 
272 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
273 	if (!sk)
274 		goto out;
275 	err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
276 	if (err) {
277 		bpf_printk("sk_assign returned %d, expected 0\n", err);
278 		goto out;
279 	}
280 
281 	if (ctx->sk != sk)
282 		goto out;
283 	err = bpf_sk_assign(ctx, NULL, 0);
284 	if (err != -EEXIST)
285 		goto out;
286 	err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE);
287 	if (err)
288 		goto out;
289 	err = bpf_sk_assign(ctx, sk, BPF_SK_LOOKUP_F_REPLACE);
290 	if (err)
291 		goto out;
292 
293 	ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
294 out:
295 	if (sk)
296 		bpf_sk_release(sk);
297 	return ret;
298 }
299 
300 /* Check that selected sk is accessible through context. */
301 SEC("sk_lookup/access_ctx_sk")
302 int access_ctx_sk(struct bpf_sk_lookup *ctx)
303 {
304 	struct bpf_sock *sk1 = NULL, *sk2 = NULL;
305 	int err, ret;
306 
307 	ret = SK_DROP;
308 
309 	/* Try accessing unassigned (NULL) ctx->sk field */
310 	if (ctx->sk && ctx->sk->family != AF_INET)
311 		goto out;
312 
313 	/* Assign a value to ctx->sk */
314 	sk1 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
315 	if (!sk1)
316 		goto out;
317 	err = bpf_sk_assign(ctx, sk1, 0);
318 	if (err)
319 		goto out;
320 	if (ctx->sk != sk1)
321 		goto out;
322 
323 	/* Access ctx->sk fields */
324 	if (ctx->sk->family != AF_INET ||
325 	    ctx->sk->type != SOCK_STREAM ||
326 	    ctx->sk->state != BPF_TCP_LISTEN)
327 		goto out;
328 
329 	/* Reset selection */
330 	err = bpf_sk_assign(ctx, NULL, BPF_SK_LOOKUP_F_REPLACE);
331 	if (err)
332 		goto out;
333 	if (ctx->sk)
334 		goto out;
335 
336 	/* Assign another socket */
337 	sk2 = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
338 	if (!sk2)
339 		goto out;
340 	err = bpf_sk_assign(ctx, sk2, BPF_SK_LOOKUP_F_REPLACE);
341 	if (err)
342 		goto out;
343 	if (ctx->sk != sk2)
344 		goto out;
345 
346 	/* Access reassigned ctx->sk fields */
347 	if (ctx->sk->family != AF_INET ||
348 	    ctx->sk->type != SOCK_STREAM ||
349 	    ctx->sk->state != BPF_TCP_LISTEN)
350 		goto out;
351 
352 	ret = SK_PASS; /* Success, redirect to KEY_SERVER_B */
353 out:
354 	if (sk1)
355 		bpf_sk_release(sk1);
356 	if (sk2)
357 		bpf_sk_release(sk2);
358 	return ret;
359 }
360 
361 /* Check narrow loads from ctx fields that support them.
362  *
363  * Narrow loads of size >= target field size from a non-zero offset
364  * are not covered because they give bogus results, that is the
365  * verifier ignores the offset.
366  */
367 SEC("sk_lookup/ctx_narrow_access")
368 int ctx_narrow_access(struct bpf_sk_lookup *ctx)
369 {
370 	struct bpf_sock *sk;
371 	int err, family;
372 	__u16 *half;
373 	__u8 *byte;
374 	bool v4;
375 
376 	v4 = (ctx->family == AF_INET);
377 
378 	/* Narrow loads from family field */
379 	byte = (__u8 *)&ctx->family;
380 	half = (__u16 *)&ctx->family;
381 	if (byte[0] != (v4 ? AF_INET : AF_INET6) ||
382 	    byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
383 		return SK_DROP;
384 	if (half[0] != (v4 ? AF_INET : AF_INET6))
385 		return SK_DROP;
386 
387 	byte = (__u8 *)&ctx->protocol;
388 	if (byte[0] != IPPROTO_TCP ||
389 	    byte[1] != 0 || byte[2] != 0 || byte[3] != 0)
390 		return SK_DROP;
391 	half = (__u16 *)&ctx->protocol;
392 	if (half[0] != IPPROTO_TCP)
393 		return SK_DROP;
394 
395 	/* Narrow loads from remote_port field. Expect non-0 value. */
396 	byte = (__u8 *)&ctx->remote_port;
397 	if (byte[0] == 0 && byte[1] == 0 && byte[2] == 0 && byte[3] == 0)
398 		return SK_DROP;
399 	half = (__u16 *)&ctx->remote_port;
400 	if (half[0] == 0)
401 		return SK_DROP;
402 
403 	/* Narrow loads from local_port field. Expect DST_PORT. */
404 	byte = (__u8 *)&ctx->local_port;
405 	if (byte[0] != ((DST_PORT >> 0) & 0xff) ||
406 	    byte[1] != ((DST_PORT >> 8) & 0xff) ||
407 	    byte[2] != 0 || byte[3] != 0)
408 		return SK_DROP;
409 	half = (__u16 *)&ctx->local_port;
410 	if (half[0] != DST_PORT)
411 		return SK_DROP;
412 
413 	/* Narrow loads from IPv4 fields */
414 	if (v4) {
415 		/* Expect non-0.0.0.0 in remote_ip4 */
416 		byte = (__u8 *)&ctx->remote_ip4;
417 		if (byte[0] == 0 && byte[1] == 0 &&
418 		    byte[2] == 0 && byte[3] == 0)
419 			return SK_DROP;
420 		half = (__u16 *)&ctx->remote_ip4;
421 		if (half[0] == 0 && half[1] == 0)
422 			return SK_DROP;
423 
424 		/* Expect DST_IP4 in local_ip4 */
425 		byte = (__u8 *)&ctx->local_ip4;
426 		if (byte[0] != ((DST_IP4 >>  0) & 0xff) ||
427 		    byte[1] != ((DST_IP4 >>  8) & 0xff) ||
428 		    byte[2] != ((DST_IP4 >> 16) & 0xff) ||
429 		    byte[3] != ((DST_IP4 >> 24) & 0xff))
430 			return SK_DROP;
431 		half = (__u16 *)&ctx->local_ip4;
432 		if (half[0] != ((DST_IP4 >>  0) & 0xffff) ||
433 		    half[1] != ((DST_IP4 >> 16) & 0xffff))
434 			return SK_DROP;
435 	} else {
436 		/* Expect 0.0.0.0 IPs when family != AF_INET */
437 		byte = (__u8 *)&ctx->remote_ip4;
438 		if (byte[0] != 0 || byte[1] != 0 &&
439 		    byte[2] != 0 || byte[3] != 0)
440 			return SK_DROP;
441 		half = (__u16 *)&ctx->remote_ip4;
442 		if (half[0] != 0 || half[1] != 0)
443 			return SK_DROP;
444 
445 		byte = (__u8 *)&ctx->local_ip4;
446 		if (byte[0] != 0 || byte[1] != 0 &&
447 		    byte[2] != 0 || byte[3] != 0)
448 			return SK_DROP;
449 		half = (__u16 *)&ctx->local_ip4;
450 		if (half[0] != 0 || half[1] != 0)
451 			return SK_DROP;
452 	}
453 
454 	/* Narrow loads from IPv6 fields */
455 	if (!v4) {
456 		/* Expenct non-:: IP in remote_ip6 */
457 		byte = (__u8 *)&ctx->remote_ip6;
458 		if (byte[0] == 0 && byte[1] == 0 &&
459 		    byte[2] == 0 && byte[3] == 0 &&
460 		    byte[4] == 0 && byte[5] == 0 &&
461 		    byte[6] == 0 && byte[7] == 0 &&
462 		    byte[8] == 0 && byte[9] == 0 &&
463 		    byte[10] == 0 && byte[11] == 0 &&
464 		    byte[12] == 0 && byte[13] == 0 &&
465 		    byte[14] == 0 && byte[15] == 0)
466 			return SK_DROP;
467 		half = (__u16 *)&ctx->remote_ip6;
468 		if (half[0] == 0 && half[1] == 0 &&
469 		    half[2] == 0 && half[3] == 0 &&
470 		    half[4] == 0 && half[5] == 0 &&
471 		    half[6] == 0 && half[7] == 0)
472 			return SK_DROP;
473 
474 		/* Expect DST_IP6 in local_ip6 */
475 		byte = (__u8 *)&ctx->local_ip6;
476 		if (byte[0] != ((DST_IP6[0] >>  0) & 0xff) ||
477 		    byte[1] != ((DST_IP6[0] >>  8) & 0xff) ||
478 		    byte[2] != ((DST_IP6[0] >> 16) & 0xff) ||
479 		    byte[3] != ((DST_IP6[0] >> 24) & 0xff) ||
480 		    byte[4] != ((DST_IP6[1] >>  0) & 0xff) ||
481 		    byte[5] != ((DST_IP6[1] >>  8) & 0xff) ||
482 		    byte[6] != ((DST_IP6[1] >> 16) & 0xff) ||
483 		    byte[7] != ((DST_IP6[1] >> 24) & 0xff) ||
484 		    byte[8] != ((DST_IP6[2] >>  0) & 0xff) ||
485 		    byte[9] != ((DST_IP6[2] >>  8) & 0xff) ||
486 		    byte[10] != ((DST_IP6[2] >> 16) & 0xff) ||
487 		    byte[11] != ((DST_IP6[2] >> 24) & 0xff) ||
488 		    byte[12] != ((DST_IP6[3] >>  0) & 0xff) ||
489 		    byte[13] != ((DST_IP6[3] >>  8) & 0xff) ||
490 		    byte[14] != ((DST_IP6[3] >> 16) & 0xff) ||
491 		    byte[15] != ((DST_IP6[3] >> 24) & 0xff))
492 			return SK_DROP;
493 		half = (__u16 *)&ctx->local_ip6;
494 		if (half[0] != ((DST_IP6[0] >>  0) & 0xffff) ||
495 		    half[1] != ((DST_IP6[0] >> 16) & 0xffff) ||
496 		    half[2] != ((DST_IP6[1] >>  0) & 0xffff) ||
497 		    half[3] != ((DST_IP6[1] >> 16) & 0xffff) ||
498 		    half[4] != ((DST_IP6[2] >>  0) & 0xffff) ||
499 		    half[5] != ((DST_IP6[2] >> 16) & 0xffff) ||
500 		    half[6] != ((DST_IP6[3] >>  0) & 0xffff) ||
501 		    half[7] != ((DST_IP6[3] >> 16) & 0xffff))
502 			return SK_DROP;
503 	} else {
504 		/* Expect :: IPs when family != AF_INET6 */
505 		byte = (__u8 *)&ctx->remote_ip6;
506 		if (byte[0] != 0 || byte[1] != 0 ||
507 		    byte[2] != 0 || byte[3] != 0 ||
508 		    byte[4] != 0 || byte[5] != 0 ||
509 		    byte[6] != 0 || byte[7] != 0 ||
510 		    byte[8] != 0 || byte[9] != 0 ||
511 		    byte[10] != 0 || byte[11] != 0 ||
512 		    byte[12] != 0 || byte[13] != 0 ||
513 		    byte[14] != 0 || byte[15] != 0)
514 			return SK_DROP;
515 		half = (__u16 *)&ctx->remote_ip6;
516 		if (half[0] != 0 || half[1] != 0 ||
517 		    half[2] != 0 || half[3] != 0 ||
518 		    half[4] != 0 || half[5] != 0 ||
519 		    half[6] != 0 || half[7] != 0)
520 			return SK_DROP;
521 
522 		byte = (__u8 *)&ctx->local_ip6;
523 		if (byte[0] != 0 || byte[1] != 0 ||
524 		    byte[2] != 0 || byte[3] != 0 ||
525 		    byte[4] != 0 || byte[5] != 0 ||
526 		    byte[6] != 0 || byte[7] != 0 ||
527 		    byte[8] != 0 || byte[9] != 0 ||
528 		    byte[10] != 0 || byte[11] != 0 ||
529 		    byte[12] != 0 || byte[13] != 0 ||
530 		    byte[14] != 0 || byte[15] != 0)
531 			return SK_DROP;
532 		half = (__u16 *)&ctx->local_ip6;
533 		if (half[0] != 0 || half[1] != 0 ||
534 		    half[2] != 0 || half[3] != 0 ||
535 		    half[4] != 0 || half[5] != 0 ||
536 		    half[6] != 0 || half[7] != 0)
537 			return SK_DROP;
538 	}
539 
540 	/* Success, redirect to KEY_SERVER_B */
541 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_B);
542 	if (sk) {
543 		bpf_sk_assign(ctx, sk, 0);
544 		bpf_sk_release(sk);
545 	}
546 	return SK_PASS;
547 }
548 
549 /* Check that sk_assign rejects SERVER_A socket with -ESOCKNOSUPPORT */
550 SEC("sk_lookup/sk_assign_esocknosupport")
551 int sk_assign_esocknosupport(struct bpf_sk_lookup *ctx)
552 {
553 	struct bpf_sock *sk;
554 	int err, ret;
555 
556 	ret = SK_DROP;
557 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
558 	if (!sk)
559 		goto out;
560 
561 	err = bpf_sk_assign(ctx, sk, 0);
562 	if (err != -ESOCKTNOSUPPORT) {
563 		bpf_printk("sk_assign returned %d, expected %d\n",
564 			   err, -ESOCKTNOSUPPORT);
565 		goto out;
566 	}
567 
568 	ret = SK_PASS; /* Success, pass to regular lookup */
569 out:
570 	if (sk)
571 		bpf_sk_release(sk);
572 	return ret;
573 }
574 
575 SEC("sk_lookup/multi_prog_pass1")
576 int multi_prog_pass1(struct bpf_sk_lookup *ctx)
577 {
578 	bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
579 	return SK_PASS;
580 }
581 
582 SEC("sk_lookup/multi_prog_pass2")
583 int multi_prog_pass2(struct bpf_sk_lookup *ctx)
584 {
585 	bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
586 	return SK_PASS;
587 }
588 
589 SEC("sk_lookup/multi_prog_drop1")
590 int multi_prog_drop1(struct bpf_sk_lookup *ctx)
591 {
592 	bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
593 	return SK_DROP;
594 }
595 
596 SEC("sk_lookup/multi_prog_drop2")
597 int multi_prog_drop2(struct bpf_sk_lookup *ctx)
598 {
599 	bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
600 	return SK_DROP;
601 }
602 
603 static __always_inline int select_server_a(struct bpf_sk_lookup *ctx)
604 {
605 	struct bpf_sock *sk;
606 	int err;
607 
608 	sk = bpf_map_lookup_elem(&redir_map, &KEY_SERVER_A);
609 	if (!sk)
610 		return SK_DROP;
611 
612 	err = bpf_sk_assign(ctx, sk, 0);
613 	bpf_sk_release(sk);
614 	if (err)
615 		return SK_DROP;
616 
617 	return SK_PASS;
618 }
619 
620 SEC("sk_lookup/multi_prog_redir1")
621 int multi_prog_redir1(struct bpf_sk_lookup *ctx)
622 {
623 	int ret;
624 
625 	ret = select_server_a(ctx);
626 	bpf_map_update_elem(&run_map, &KEY_PROG1, &PROG_DONE, BPF_ANY);
627 	return SK_PASS;
628 }
629 
630 SEC("sk_lookup/multi_prog_redir2")
631 int multi_prog_redir2(struct bpf_sk_lookup *ctx)
632 {
633 	int ret;
634 
635 	ret = select_server_a(ctx);
636 	bpf_map_update_elem(&run_map, &KEY_PROG2, &PROG_DONE, BPF_ANY);
637 	return SK_PASS;
638 }
639 
640 char _license[] SEC("license") = "Dual BSD/GPL";
641 __u32 _version SEC("version") = 1;
642