1 // SPDX-License-Identifier: GPL-2.0
2 
3 #include <test_progs.h>
4 #include "cgroup_helpers.h"
5 #include "testing_helpers.h"
6 #include "test_cgroup_link.skel.h"
7 
8 static __u32 duration = 0;
9 #define PING_CMD	"ping -q -c1 -w1 127.0.0.1 > /dev/null"
10 
11 static struct test_cgroup_link *skel = NULL;
12 
13 int ping_and_check(int exp_calls, int exp_alt_calls)
14 {
15 	skel->bss->calls = 0;
16 	skel->bss->alt_calls = 0;
17 	CHECK_FAIL(system(PING_CMD));
18 	if (CHECK(skel->bss->calls != exp_calls, "call_cnt",
19 		  "exp %d, got %d\n", exp_calls, skel->bss->calls))
20 		return -EINVAL;
21 	if (CHECK(skel->bss->alt_calls != exp_alt_calls, "alt_call_cnt",
22 		  "exp %d, got %d\n", exp_alt_calls, skel->bss->alt_calls))
23 		return -EINVAL;
24 	return 0;
25 }
26 
27 void test_cgroup_link(void)
28 {
29 	struct {
30 		const char *path;
31 		int fd;
32 	} cgs[] = {
33 		{ "/cg1" },
34 		{ "/cg1/cg2" },
35 		{ "/cg1/cg2/cg3" },
36 		{ "/cg1/cg2/cg3/cg4" },
37 	};
38 	int last_cg = ARRAY_SIZE(cgs) - 1, cg_nr = ARRAY_SIZE(cgs);
39 	DECLARE_LIBBPF_OPTS(bpf_link_update_opts, link_upd_opts);
40 	struct bpf_link *links[ARRAY_SIZE(cgs)] = {}, *tmp_link;
41 	__u32 prog_ids[ARRAY_SIZE(cgs)], prog_cnt = 0, attach_flags, prog_id;
42 	struct bpf_link_info info;
43 	int i = 0, err, prog_fd;
44 	bool detach_legacy = false;
45 
46 	skel = test_cgroup_link__open_and_load();
47 	if (CHECK(!skel, "skel_open_load", "failed to open/load skeleton\n"))
48 		return;
49 	prog_fd = bpf_program__fd(skel->progs.egress);
50 
51 	err = setup_cgroup_environment();
52 	if (CHECK(err, "cg_init", "failed: %d\n", err))
53 		goto cleanup;
54 
55 	for (i = 0; i < cg_nr; i++) {
56 		cgs[i].fd = create_and_get_cgroup(cgs[i].path);
57 		if (CHECK(cgs[i].fd < 0, "cg_create", "fail: %d\n", cgs[i].fd))
58 			goto cleanup;
59 	}
60 
61 	err = join_cgroup(cgs[last_cg].path);
62 	if (CHECK(err, "cg_join", "fail: %d\n", err))
63 		goto cleanup;
64 
65 	for (i = 0; i < cg_nr; i++) {
66 		links[i] = bpf_program__attach_cgroup(skel->progs.egress,
67 						      cgs[i].fd);
68 		if (CHECK(IS_ERR(links[i]), "cg_attach", "i: %d, err: %ld\n",
69 				 i, PTR_ERR(links[i])))
70 			goto cleanup;
71 	}
72 
73 	ping_and_check(cg_nr, 0);
74 
75 	/* query the number of effective progs and attach flags in root cg */
76 	err = bpf_prog_query(cgs[0].fd, BPF_CGROUP_INET_EGRESS,
77 			     BPF_F_QUERY_EFFECTIVE, &attach_flags, NULL,
78 			     &prog_cnt);
79 	CHECK_FAIL(err);
80 	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
81 	if (CHECK(prog_cnt != 1, "effect_cnt", "exp %d, got %d\n", 1, prog_cnt))
82 		goto cleanup;
83 
84 	/* query the number of effective progs in last cg */
85 	err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
86 			     BPF_F_QUERY_EFFECTIVE, NULL, NULL,
87 			     &prog_cnt);
88 	CHECK_FAIL(err);
89 	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
90 	if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
91 		  cg_nr, prog_cnt))
92 		goto cleanup;
93 
94 	/* query the effective prog IDs in last cg */
95 	err = bpf_prog_query(cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS,
96 			     BPF_F_QUERY_EFFECTIVE, &attach_flags,
97 			     prog_ids, &prog_cnt);
98 	CHECK_FAIL(err);
99 	CHECK_FAIL(attach_flags != BPF_F_ALLOW_MULTI);
100 	if (CHECK(prog_cnt != cg_nr, "effect_cnt", "exp %d, got %d\n",
101 		  cg_nr, prog_cnt))
102 		goto cleanup;
103 	for (i = 1; i < prog_cnt; i++) {
104 		CHECK(prog_ids[i - 1] != prog_ids[i], "prog_id_check",
105 		      "idx %d, prev id %d, cur id %d\n",
106 		      i, prog_ids[i - 1], prog_ids[i]);
107 	}
108 
109 	/* detach bottom program and ping again */
110 	bpf_link__destroy(links[last_cg]);
111 	links[last_cg] = NULL;
112 
113 	ping_and_check(cg_nr - 1, 0);
114 
115 	/* mix in with non link-based multi-attachments */
116 	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
117 			      BPF_CGROUP_INET_EGRESS, BPF_F_ALLOW_MULTI);
118 	if (CHECK(err, "cg_attach_legacy", "errno=%d\n", errno))
119 		goto cleanup;
120 	detach_legacy = true;
121 
122 	links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
123 						    cgs[last_cg].fd);
124 	if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
125 		  PTR_ERR(links[last_cg])))
126 		goto cleanup;
127 
128 	ping_and_check(cg_nr + 1, 0);
129 
130 	/* detach link */
131 	bpf_link__destroy(links[last_cg]);
132 	links[last_cg] = NULL;
133 
134 	/* detach legacy */
135 	err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
136 	if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
137 		goto cleanup;
138 	detach_legacy = false;
139 
140 	/* attach legacy exclusive prog attachment */
141 	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
142 			      BPF_CGROUP_INET_EGRESS, 0);
143 	if (CHECK(err, "cg_attach_exclusive", "errno=%d\n", errno))
144 		goto cleanup;
145 	detach_legacy = true;
146 
147 	/* attempt to mix in with multi-attach bpf_link */
148 	tmp_link = bpf_program__attach_cgroup(skel->progs.egress,
149 					      cgs[last_cg].fd);
150 	if (CHECK(!IS_ERR(tmp_link), "cg_attach_fail", "unexpected success!\n")) {
151 		bpf_link__destroy(tmp_link);
152 		goto cleanup;
153 	}
154 
155 	ping_and_check(cg_nr, 0);
156 
157 	/* detach */
158 	err = bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
159 	if (CHECK(err, "cg_detach_legacy", "errno=%d\n", errno))
160 		goto cleanup;
161 	detach_legacy = false;
162 
163 	ping_and_check(cg_nr - 1, 0);
164 
165 	/* attach back link-based one */
166 	links[last_cg] = bpf_program__attach_cgroup(skel->progs.egress,
167 						    cgs[last_cg].fd);
168 	if (CHECK(IS_ERR(links[last_cg]), "cg_attach", "err: %ld\n",
169 		  PTR_ERR(links[last_cg])))
170 		goto cleanup;
171 
172 	ping_and_check(cg_nr, 0);
173 
174 	/* check legacy exclusive prog can't be attached */
175 	err = bpf_prog_attach(prog_fd, cgs[last_cg].fd,
176 			      BPF_CGROUP_INET_EGRESS, 0);
177 	if (CHECK(!err, "cg_attach_exclusive", "unexpected success")) {
178 		bpf_prog_detach2(prog_fd, cgs[last_cg].fd, BPF_CGROUP_INET_EGRESS);
179 		goto cleanup;
180 	}
181 
182 	/* replace BPF programs inside their links for all but first link */
183 	for (i = 1; i < cg_nr; i++) {
184 		err = bpf_link__update_program(links[i], skel->progs.egress_alt);
185 		if (CHECK(err, "prog_upd", "link #%d\n", i))
186 			goto cleanup;
187 	}
188 
189 	ping_and_check(1, cg_nr - 1);
190 
191 	/* Attempt program update with wrong expected BPF program */
192 	link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress_alt);
193 	link_upd_opts.flags = BPF_F_REPLACE;
194 	err = bpf_link_update(bpf_link__fd(links[0]),
195 			      bpf_program__fd(skel->progs.egress_alt),
196 			      &link_upd_opts);
197 	if (CHECK(err == 0 || errno != EPERM, "prog_cmpxchg1",
198 		  "unexpectedly succeeded, err %d, errno %d\n", err, -errno))
199 		goto cleanup;
200 
201 	/* Compare-exchange single link program from egress to egress_alt */
202 	link_upd_opts.old_prog_fd = bpf_program__fd(skel->progs.egress);
203 	link_upd_opts.flags = BPF_F_REPLACE;
204 	err = bpf_link_update(bpf_link__fd(links[0]),
205 			      bpf_program__fd(skel->progs.egress_alt),
206 			      &link_upd_opts);
207 	if (CHECK(err, "prog_cmpxchg2", "errno %d\n", -errno))
208 		goto cleanup;
209 
210 	/* ping */
211 	ping_and_check(0, cg_nr);
212 
213 	/* close cgroup FDs before detaching links */
214 	for (i = 0; i < cg_nr; i++) {
215 		if (cgs[i].fd > 0) {
216 			close(cgs[i].fd);
217 			cgs[i].fd = -1;
218 		}
219 	}
220 
221 	/* BPF programs should still get called */
222 	ping_and_check(0, cg_nr);
223 
224 	prog_id = link_info_prog_id(links[0], &info);
225 	CHECK(prog_id == 0, "link_info", "failed\n");
226 	CHECK(info.cgroup.cgroup_id == 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
227 
228 	err = bpf_link__detach(links[0]);
229 	if (CHECK(err, "link_detach", "failed %d\n", err))
230 		goto cleanup;
231 
232 	/* cgroup_id should be zero in link_info */
233 	prog_id = link_info_prog_id(links[0], &info);
234 	CHECK(prog_id == 0, "link_info", "failed\n");
235 	CHECK(info.cgroup.cgroup_id != 0, "cgroup_id", "unexpected %llu\n", info.cgroup.cgroup_id);
236 
237 	/* First BPF program shouldn't be called anymore */
238 	ping_and_check(0, cg_nr - 1);
239 
240 	/* leave cgroup and remove them, don't detach programs */
241 	cleanup_cgroup_environment();
242 
243 	/* BPF programs should have been auto-detached */
244 	ping_and_check(0, 0);
245 
246 cleanup:
247 	if (detach_legacy)
248 		bpf_prog_detach2(prog_fd, cgs[last_cg].fd,
249 				 BPF_CGROUP_INET_EGRESS);
250 
251 	for (i = 0; i < cg_nr; i++) {
252 		if (!IS_ERR(links[i]))
253 			bpf_link__destroy(links[i]);
254 	}
255 	test_cgroup_link__destroy(skel);
256 
257 	for (i = 0; i < cg_nr; i++) {
258 		if (cgs[i].fd > 0)
259 			close(cgs[i].fd);
260 	}
261 	cleanup_cgroup_environment();
262 }
263