1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <test_progs.h>
4 #include "cgroup_helpers.h"
5 #include "test_cgroup_link.skel.h"
6 
7 static __u32 duration = 0;
8 #define PING_CMD	"ping -q -c1 -w1 127.0.0.1 > /dev/null"
9 
10 static struct test_cgroup_link *skel = NULL;
11 
12 int ping_and_check(int exp_calls, int exp_alt_calls)
13 {
14 	skel->bss->calls = 0;
15 	skel->bss->alt_calls = 0;
16 	CHECK_FAIL(system(PING_CMD));
17 	if (CHECK(skel->bss->calls != exp_calls, "call_cnt",
18 		  "exp %d, got %d\n", exp_calls, skel->bss->calls))
19 		return -EINVAL;
20 	if (CHECK(skel->bss->alt_calls != exp_alt_calls, "alt_call_cnt",
21 		  "exp %d, got %d\n", exp_alt_calls, skel->bss->alt_calls))
22 		return -EINVAL;
23 	return 0;
24 }
25 
26 void test_cgroup_link(void)
27 {
28 	struct {
29 		const char *path;
30 		int fd;
31 	} cgs[] = {
32 		{ "/cg1" },
33 		{ "/cg1/cg2" },
34 		{ "/cg1/cg2/cg3" },
35 		{ "/cg1/cg2/cg3/cg4" },
36 	};
37 	int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs);
38 	DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts);
39 	struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link;
40 	__u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags;
41 	int i = 0, err, prog_fd;
42 	bool detach_legacy = false;
43 
44 	skel = test_cgroup_link__open_and_load();
45 	if (CHECK(!skel, "skel_open_load", "failed to open/load skeleton\n"))
46 		return;
47 	prog_fd = bpf_program__fd(skel->progs.egress);
48 
49 	err = setup_cgroup_environment();
50 	if (CHECK(err, "cg_init", "failed: %d\n", err))
51 		goto cleanup;
52 
53 	for (i = 0; i < cg_nr; i++) {
54 		cgs[i].fd = create_and_get_cgroup(cgs[i].path);
55 		if (CHECK(cgs[i].fd < 0, "cg_create", "fail: %d\n", cgs[i].fd))
56 			goto cleanup;
57 	}
58 
59 	err = join_cgroup(cgs[last_cg].path);
60 	if (CHECK(err, "cg_join", "fail: %d\n", err))
61 		goto cleanup;
62 
63 	for (i = 0; i < cg_nr; i++) {
64 		links[i] = bpf_program__attach_cgroup(skel->progs.egress,
65 						      cgs[i].fd);
66 		if (CHECK(IS_ERR(links[i]), "cg_attach", "i: %d, err: %ld\n",
67 				 i, PTR_ERR(links[i])))
68 			goto cleanup;
69 	}
70 
71 	ping_and_check(cg_nr, 0);
72 
73 	/* query the number of effective progs and attach flags in root cg */
74 	err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS,
75 			     BPF_F_QUERY_EFFECTIVE, &attach_flags, NULL,
76 			     &prog_cnt);
77 	CHECK_FAIL(err);
78 	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
79 	if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt))
80 		goto cleanup;
81 
82 	/* query the number of effective progs in last cg */
83 	err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
84 			     BPF_F_QUERY_EFFECTIVE, NULL, NULL,
85 			     &prog_cnt);
86 	CHECK_FAIL(err);
87 	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
88 	if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
89 		  cg_nr, prog_cnt))
90 		goto cleanup;
91 
92 	/* query the effective prog IDs in last cg */
93 	err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
94 			     BPF_F_QUERY_EFFECTIVE, &attach_flags,
95 			     prog_ids, &prog_cnt);
96 	CHECK_FAIL(err);
97 	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
98 	if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
99 		  cg_nr, prog_cnt))
100 		goto cleanup;
101 	for (i = 1; i < prog_cnt; i++) {
102 		CHECK(prog_ids[i - 1] != prog_ids[i], "prog_id_check",
103 		      "idx %d, prev id %d, cur id %d\n",
104 		      i, prog_ids[i - 1], prog_ids[i]);
105 	}
106 
107 	/* detach bottom program and ping again */
108 	bpf_link__destroy(links[last_cg]);
109 	links[last_cg] = NULL;
110 
111 	ping_and_check(cg_nr - 1, 0);
112 
113 	/* mix in with non link-based multi-attachments */
114 	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
115 			      BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI);
116 	if (CHECK(err, "cg_attach_legacy", "errno=%d\n", errno))
117 		goto cleanup;
118 	detach_legacy = true;
119 
120 	links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
121 						    cgs[last_cg].fd);
122 	if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
123 		  PTR_ERR(links[last_cg])))
124 		goto cleanup;
125 
126 	ping_and_check(cg_nr + 1, 0);
127 
128 	/* detach link */
129 	bpf_link__destroy(links[last_cg]);
130 	links[last_cg] = NULL;
131 
132 	/* detach legacy */
133 	err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
134 	if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
135 		goto cleanup;
136 	detach_legacy = false;
137 
138 	/* attach legacy exclusive prog attachment */
139 	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
140 			      BPF_CGROUP_INET_EGRESS, 0);
141 	if (CHECK(err, "cg_attach_exclusive", "errno=%d\n", errno))
142 		goto cleanup;
143 	detach_legacy = true;
144 
145 	/* attempt to mix in with multi-attach bpf_link */
146 	tmp_link = bpf_program__attach_cgroup(skel->progs.egress,
147 					      cgs[last_cg].fd);
148 	if (CHECK(!IS_ERR(tmp_link), "cg_attach_fail", "unexpected success!\n")) {
149 		bpf_link__destroy(tmp_link);
150 		goto cleanup;
151 	}
152 
153 	ping_and_check(cg_nr, 0);
154 
155 	/* detach */
156 	err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
157 	if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
158 		goto cleanup;
159 	detach_legacy = false;
160 
161 	ping_and_check(cg_nr - 1, 0);
162 
163 	/* attach back link-based one */
164 	links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
165 						    cgs[last_cg].fd);
166 	if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
167 		  PTR_ERR(links[last_cg])))
168 		goto cleanup;
169 
170 	ping_and_check(cg_nr, 0);
171 
172 	/* check legacy exclusive prog can't be attached */
173 	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
174 			      BPF_CGROUP_INET_EGRESS, 0);
175 	if (CHECK(!err, "cg_attach_exclusive", "unexpected success")) {
176 		bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
177 		goto cleanup;
178 	}
179 
180 	/* replace BPF programs inside their links for all but first link */
181 	for (i = 1; i < cg_nr; i++) {
182 		err = bpf_link__update_program(links[i], skel->progs.egress_alt);
183 		if (CHECK(err, "prog_upd", "link #%d\n", i))
184 			goto cleanup;
185 	}
186 
187 	ping_and_check(1, cg_nr - 1);
188 
189 	/* Attempt program update with wrong expected BPF program */
190 	link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress_alt);
191 	link_upd_opts.flags = BPF_F_REPLACE;
192 	err = bpf_link_update(bpf_link__fd(links[0]),
193 			      bpf_program__fd(skel->progs.egress_alt),
194 			      &link_upd_opts);
195 	if (CHECK(err == 0 || errno != EPERM, "prog_cmpxchg1",
196 		  "unexpectedly succeeded, err %d, errno %d\n", err, -errno))
197 		goto cleanup;
198 
199 	/* Compare-exchange single link program from egress to egress_alt */
200 	link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress);
201 	link_upd_opts.flags = BPF_F_REPLACE;
202 	err = bpf_link_update(bpf_link__fd(links[0]),
203 			      bpf_program__fd(skel->progs.egress_alt),
204 			      &link_upd_opts);
205 	if (CHECK(err, "prog_cmpxchg2", "errno %d\n", -errno))
206 		goto cleanup;
207 
208 	/* ping */
209 	ping_and_check(0, cg_nr);
210 
211 	/* close cgroup FDs before detaching links */
212 	for (i = 0; i < cg_nr; i++) {
213 		if (cgs[i].fd > 0) {
214 			close(cgs[i].fd);
215 			cgs[i].fd = -1;
216 		}
217 	}
218 
219 	/* BPF programs should still get called */
220 	ping_and_check(0, cg_nr);
221 
222 	/* leave cgroup and remove them, don't detach programs */
223 	cleanup_cgroup_environment();
224 
225 	/* BPF programs should have been auto-detached */
226 	ping_and_check(0, 0);
227 
228 cleanup:
229 	if (detach_legacy)
230 		bpf_prog_detach2(prog_fd, cgs[last_cg].fd,
231 				 BPF_CGROUP_INET_EGRESS);
232 
233 	for (i = 0; i < cg_nr; i++) {
234 		if (!IS_ERR(links[i]))
235 			bpf_link__destroy(links[i]);
236 	}
237 	test_cgroup_link__destroy(skel);
238 
239 	for (i = 0; i < cg_nr; i++) {
240 		if (cgs[i].fd > 0)
241 			close(cgs[i].fd);
242 	}
243 	cleanup_cgroup_environment();
244 }
245