1 // SPDX-License-Identifier: GPL-2.0 2 3 #include <test_progs.h> 4 5 #include "cgroup_helpers.h" 6 7 #define PING_CMD "ping -q -c1 -w1 127.0.0.1 > /dev/null" 8 9 static char bpf_log_buf[BPF_LOG_BUF_SIZE]; 10 11 static int map_fd = -1; 12 13 static int prog_load_cnt(int verdict, int val) 14 { 15 int cgroup_storage_fd, percpu_cgroup_storage_fd; 16 17 if (map_fd < 0) 18 map_fd = bpf_map_create(BPF_MAP_TYPE_ARRAY, NULL, 4, 8, 1, NULL); 19 if (map_fd < 0) { 20 printf("failed to create map '%s'\n", strerror(errno)); 21 return -1; 22 } 23 24 cgroup_storage_fd = bpf_map_create(BPF_MAP_TYPE_CGROUP_STORAGE, NULL, 25 sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL); 26 if (cgroup_storage_fd < 0) { 27 printf("failed to create map '%s'\n", strerror(errno)); 28 return -1; 29 } 30 31 percpu_cgroup_storage_fd = bpf_map_create( 32 BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE, NULL, 33 sizeof(struct bpf_cgroup_storage_key), 8, 0, NULL); 34 if (percpu_cgroup_storage_fd < 0) { 35 printf("failed to create map '%s'\n", strerror(errno)); 36 return -1; 37 } 38 39 struct bpf_insn prog[] = { 40 BPF_MOV32_IMM(BPF_REG_0, 0), 41 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -4), /* *(u32 *)(fp - 4) = r0 */ 42 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10), 43 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4), /* r2 = fp - 4 */ 44 BPF_LD_MAP_FD(BPF_REG_1, map_fd), 45 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem), 46 BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2), 47 BPF_MOV64_IMM(BPF_REG_1, val), /* r1 = 1 */ 48 BPF_ATOMIC_OP(BPF_DW, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), 49 50 BPF_LD_MAP_FD(BPF_REG_1, cgroup_storage_fd), 51 BPF_MOV64_IMM(BPF_REG_2, 0), 52 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), 53 BPF_MOV64_IMM(BPF_REG_1, val), 54 BPF_ATOMIC_OP(BPF_W, BPF_ADD, BPF_REG_0, BPF_REG_1, 0), 55 56 BPF_LD_MAP_FD(BPF_REG_1, percpu_cgroup_storage_fd), 57 BPF_MOV64_IMM(BPF_REG_2, 0), 58 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage), 59 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_0, 0), 60 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 0x1), 61 BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0), 62 63 BPF_MOV64_IMM(BPF_REG_0, verdict), /* r0 = verdict */ 64 BPF_EXIT_INSN(), 65 }; 66 size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn); 67 int ret; 68 69 ret = bpf_test_load_program(BPF_PROG_TYPE_CGROUP_SKB, 70 prog, insns_cnt, "GPL", 0, 71 bpf_log_buf, BPF_LOG_BUF_SIZE); 72 73 close(cgroup_storage_fd); 74 return ret; 75 } 76 77 void serial_test_cgroup_attach_multi(void) 78 { 79 __u32 prog_ids[4], prog_cnt = 0, attach_flags, saved_prog_id; 80 int cg1 = 0, cg2 = 0, cg3 = 0, cg4 = 0, cg5 = 0, key = 0; 81 DECLARE_LIBBPF_OPTS(bpf_prog_attach_opts, attach_opts); 82 int allow_prog[7] = {-1}; 83 unsigned long long value; 84 __u32 duration = 0; 85 int i = 0; 86 87 for (i = 0; i < ARRAY_SIZE(allow_prog); i++) { 88 allow_prog[i] = prog_load_cnt(1, 1 << i); 89 if (CHECK(allow_prog[i] < 0, "prog_load", 90 "verifier output:\n%s\n-------\n", bpf_log_buf)) 91 goto err; 92 } 93 94 if (CHECK_FAIL(setup_cgroup_environment())) 95 goto err; 96 97 cg1 = create_and_get_cgroup("/cg1"); 98 if (CHECK_FAIL(cg1 < 0)) 99 goto err; 100 cg2 = create_and_get_cgroup("/cg1/cg2"); 101 if (CHECK_FAIL(cg2 < 0)) 102 goto err; 103 cg3 = create_and_get_cgroup("/cg1/cg2/cg3"); 104 if (CHECK_FAIL(cg3 < 0)) 105 goto err; 106 cg4 = create_and_get_cgroup("/cg1/cg2/cg3/cg4"); 107 if (CHECK_FAIL(cg4 < 0)) 108 goto err; 109 cg5 = create_and_get_cgroup("/cg1/cg2/cg3/cg4/cg5"); 110 if (CHECK_FAIL(cg5 < 0)) 111 goto err; 112 113 if (CHECK_FAIL(join_cgroup("/cg1/cg2/cg3/cg4/cg5"))) 114 goto err; 115 116 if (CHECK(bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, 117 BPF_F_ALLOW_MULTI), 118 "prog0_attach_to_cg1_multi", "errno=%d\n", errno)) 119 goto err; 120 121 if (CHECK(!bpf_prog_attach(allow_prog[0], cg1, BPF_CGROUP_INET_EGRESS, 122 BPF_F_ALLOW_MULTI), 123 "fail_same_prog_attach_to_cg1", "unexpected success\n")) 124 goto err; 125 126 if (CHECK(bpf_prog_attach(allow_prog[1], cg1, BPF_CGROUP_INET_EGRESS, 127 BPF_F_ALLOW_MULTI), 128 "prog1_attach_to_cg1_multi", "errno=%d\n", errno)) 129 goto err; 130 131 if (CHECK(bpf_prog_attach(allow_prog[2], cg2, BPF_CGROUP_INET_EGRESS, 132 BPF_F_ALLOW_OVERRIDE), 133 "prog2_attach_to_cg2_override", "errno=%d\n", errno)) 134 goto err; 135 136 if (CHECK(bpf_prog_attach(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS, 137 BPF_F_ALLOW_MULTI), 138 "prog3_attach_to_cg3_multi", "errno=%d\n", errno)) 139 goto err; 140 141 if (CHECK(bpf_prog_attach(allow_prog[4], cg4, BPF_CGROUP_INET_EGRESS, 142 BPF_F_ALLOW_OVERRIDE), 143 "prog4_attach_to_cg4_override", "errno=%d\n", errno)) 144 goto err; 145 146 if (CHECK(bpf_prog_attach(allow_prog[5], cg5, BPF_CGROUP_INET_EGRESS, 0), 147 "prog5_attach_to_cg5_none", "errno=%d\n", errno)) 148 goto err; 149 150 CHECK_FAIL(system(PING_CMD)); 151 CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); 152 CHECK_FAIL(value != 1 + 2 + 8 + 32); 153 154 /* query the number of effective progs in cg5 */ 155 CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 156 BPF_F_QUERY_EFFECTIVE, NULL, NULL, &prog_cnt)); 157 CHECK_FAIL(prog_cnt != 4); 158 /* retrieve prog_ids of effective progs in cg5 */ 159 CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 160 BPF_F_QUERY_EFFECTIVE, &attach_flags, 161 prog_ids, &prog_cnt)); 162 CHECK_FAIL(prog_cnt != 4); 163 CHECK_FAIL(attach_flags != 0); 164 saved_prog_id = prog_ids[0]; 165 /* check enospc handling */ 166 prog_ids[0] = 0; 167 prog_cnt = 2; 168 CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 169 BPF_F_QUERY_EFFECTIVE, &attach_flags, 170 prog_ids, &prog_cnt) >= 0); 171 CHECK_FAIL(errno != ENOSPC); 172 CHECK_FAIL(prog_cnt != 4); 173 /* check that prog_ids are returned even when buffer is too small */ 174 CHECK_FAIL(prog_ids[0] != saved_prog_id); 175 /* retrieve prog_id of single attached prog in cg5 */ 176 prog_ids[0] = 0; 177 CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL, 178 prog_ids, &prog_cnt)); 179 CHECK_FAIL(prog_cnt != 1); 180 CHECK_FAIL(prog_ids[0] != saved_prog_id); 181 182 /* detach bottom program and ping again */ 183 if (CHECK(bpf_prog_detach2(-1, cg5, BPF_CGROUP_INET_EGRESS), 184 "prog_detach_from_cg5", "errno=%d\n", errno)) 185 goto err; 186 187 value = 0; 188 CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); 189 CHECK_FAIL(system(PING_CMD)); 190 CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); 191 CHECK_FAIL(value != 1 + 2 + 8 + 16); 192 193 /* test replace */ 194 195 attach_opts.flags = BPF_F_ALLOW_OVERRIDE | BPF_F_REPLACE; 196 attach_opts.replace_prog_fd = allow_prog[0]; 197 if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, 198 BPF_CGROUP_INET_EGRESS, &attach_opts), 199 "fail_prog_replace_override", "unexpected success\n")) 200 goto err; 201 CHECK_FAIL(errno != EINVAL); 202 203 attach_opts.flags = BPF_F_REPLACE; 204 if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, 205 BPF_CGROUP_INET_EGRESS, &attach_opts), 206 "fail_prog_replace_no_multi", "unexpected success\n")) 207 goto err; 208 CHECK_FAIL(errno != EINVAL); 209 210 attach_opts.flags = BPF_F_ALLOW_MULTI | BPF_F_REPLACE; 211 attach_opts.replace_prog_fd = -1; 212 if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, 213 BPF_CGROUP_INET_EGRESS, &attach_opts), 214 "fail_prog_replace_bad_fd", "unexpected success\n")) 215 goto err; 216 CHECK_FAIL(errno != EBADF); 217 218 /* replacing a program that is not attached to cgroup should fail */ 219 attach_opts.replace_prog_fd = allow_prog[3]; 220 if (CHECK(!bpf_prog_attach_xattr(allow_prog[6], cg1, 221 BPF_CGROUP_INET_EGRESS, &attach_opts), 222 "fail_prog_replace_no_ent", "unexpected success\n")) 223 goto err; 224 CHECK_FAIL(errno != ENOENT); 225 226 /* replace 1st from the top program */ 227 attach_opts.replace_prog_fd = allow_prog[0]; 228 if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1, 229 BPF_CGROUP_INET_EGRESS, &attach_opts), 230 "prog_replace", "errno=%d\n", errno)) 231 goto err; 232 233 /* replace program with itself */ 234 attach_opts.replace_prog_fd = allow_prog[6]; 235 if (CHECK(bpf_prog_attach_xattr(allow_prog[6], cg1, 236 BPF_CGROUP_INET_EGRESS, &attach_opts), 237 "prog_replace", "errno=%d\n", errno)) 238 goto err; 239 240 value = 0; 241 CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); 242 CHECK_FAIL(system(PING_CMD)); 243 CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); 244 CHECK_FAIL(value != 64 + 2 + 8 + 16); 245 246 /* detach 3rd from bottom program and ping again */ 247 if (CHECK(!bpf_prog_detach2(0, cg3, BPF_CGROUP_INET_EGRESS), 248 "fail_prog_detach_from_cg3", "unexpected success\n")) 249 goto err; 250 251 if (CHECK(bpf_prog_detach2(allow_prog[3], cg3, BPF_CGROUP_INET_EGRESS), 252 "prog3_detach_from_cg3", "errno=%d\n", errno)) 253 goto err; 254 255 value = 0; 256 CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); 257 CHECK_FAIL(system(PING_CMD)); 258 CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); 259 CHECK_FAIL(value != 64 + 2 + 16); 260 261 /* detach 2nd from bottom program and ping again */ 262 if (CHECK(bpf_prog_detach2(-1, cg4, BPF_CGROUP_INET_EGRESS), 263 "prog_detach_from_cg4", "errno=%d\n", errno)) 264 goto err; 265 266 value = 0; 267 CHECK_FAIL(bpf_map_update_elem(map_fd, &key, &value, 0)); 268 CHECK_FAIL(system(PING_CMD)); 269 CHECK_FAIL(bpf_map_lookup_elem(map_fd, &key, &value)); 270 CHECK_FAIL(value != 64 + 2 + 4); 271 272 prog_cnt = 4; 273 CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 274 BPF_F_QUERY_EFFECTIVE, &attach_flags, 275 prog_ids, &prog_cnt)); 276 CHECK_FAIL(prog_cnt != 3); 277 CHECK_FAIL(attach_flags != 0); 278 CHECK_FAIL(bpf_prog_query(cg5, BPF_CGROUP_INET_EGRESS, 0, NULL, 279 prog_ids, &prog_cnt)); 280 CHECK_FAIL(prog_cnt != 0); 281 282 err: 283 for (i = 0; i < ARRAY_SIZE(allow_prog); i++) 284 if (allow_prog[i] >= 0) 285 close(allow_prog[i]); 286 close(cg1); 287 close(cg2); 288 close(cg3); 289 close(cg4); 290 close(cg5); 291 cleanup_cgroup_environment(); 292 } 293