1 // SPDX-License-Identifier: GPL-2.0 2 #include <test_progs.h> 3 4 /* test_tailcall_1 checks basic functionality by patching multiple locations 5 * in a single program for a single tail call slot with nop->jmp, jmp->nop 6 * and jmp->jmp rewrites. Also checks for nop->nop. 7 */ 8 static void test_tailcall_1(void) 9 { 10 int err, map_fd, prog_fd, main_fd, i, j; 11 struct bpf_map *prog_array; 12 struct bpf_program *prog; 13 struct bpf_object *obj; 14 __u32 retval, duration; 15 char prog_name[32]; 16 char buff[128] = {}; 17 18 err = bpf_prog_load("tailcall1.o", BPF_PROG_TYPE_SCHED_CLS, &obj, 19 &prog_fd); 20 if (CHECK_FAIL(err)) 21 return; 22 23 prog = bpf_object__find_program_by_title(obj, "classifier"); 24 if (CHECK_FAIL(!prog)) 25 goto out; 26 27 main_fd = bpf_program__fd(prog); 28 if (CHECK_FAIL(main_fd < 0)) 29 goto out; 30 31 prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); 32 if (CHECK_FAIL(!prog_array)) 33 goto out; 34 35 map_fd = bpf_map__fd(prog_array); 36 if (CHECK_FAIL(map_fd < 0)) 37 goto out; 38 39 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 40 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); 41 42 prog = bpf_object__find_program_by_title(obj, prog_name); 43 if (CHECK_FAIL(!prog)) 44 goto out; 45 46 prog_fd = bpf_program__fd(prog); 47 if (CHECK_FAIL(prog_fd < 0)) 48 goto out; 49 50 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); 51 if (CHECK_FAIL(err)) 52 goto out; 53 } 54 55 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 56 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 57 &duration, &retval, NULL); 58 CHECK(err || retval != i, "tailcall", 59 "err %d errno %d retval %d\n", err, errno, retval); 60 61 err = bpf_map_delete_elem(map_fd, &i); 62 if (CHECK_FAIL(err)) 63 goto out; 64 } 65 66 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 67 &duration, &retval, NULL); 68 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", 69 err, errno, retval); 70 71 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 72 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); 73 74 prog = bpf_object__find_program_by_title(obj, prog_name); 75 if (CHECK_FAIL(!prog)) 76 goto out; 77 78 prog_fd = bpf_program__fd(prog); 79 if (CHECK_FAIL(prog_fd < 0)) 80 goto out; 81 82 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); 83 if (CHECK_FAIL(err)) 84 goto out; 85 } 86 87 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 88 &duration, &retval, NULL); 89 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", 90 err, errno, retval); 91 92 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 93 j = bpf_map__def(prog_array)->max_entries - 1 - i; 94 snprintf(prog_name, sizeof(prog_name), "classifier/%i", j); 95 96 prog = bpf_object__find_program_by_title(obj, prog_name); 97 if (CHECK_FAIL(!prog)) 98 goto out; 99 100 prog_fd = bpf_program__fd(prog); 101 if (CHECK_FAIL(prog_fd < 0)) 102 goto out; 103 104 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); 105 if (CHECK_FAIL(err)) 106 goto out; 107 } 108 109 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 110 j = bpf_map__def(prog_array)->max_entries - 1 - i; 111 112 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 113 &duration, &retval, NULL); 114 CHECK(err || retval != j, "tailcall", 115 "err %d errno %d retval %d\n", err, errno, retval); 116 117 err = bpf_map_delete_elem(map_fd, &i); 118 if (CHECK_FAIL(err)) 119 goto out; 120 } 121 122 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 123 &duration, &retval, NULL); 124 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", 125 err, errno, retval); 126 127 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 128 err = bpf_map_delete_elem(map_fd, &i); 129 if (CHECK_FAIL(err >= 0 || errno != ENOENT)) 130 goto out; 131 132 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 133 &duration, &retval, NULL); 134 CHECK(err || retval != 3, "tailcall", 135 "err %d errno %d retval %d\n", err, errno, retval); 136 } 137 138 out: 139 bpf_object__close(obj); 140 } 141 142 /* test_tailcall_2 checks that patching multiple programs for a single 143 * tail call slot works. It also jumps through several programs and tests 144 * the tail call limit counter. 145 */ 146 static void test_tailcall_2(void) 147 { 148 int err, map_fd, prog_fd, main_fd, i; 149 struct bpf_map *prog_array; 150 struct bpf_program *prog; 151 struct bpf_object *obj; 152 __u32 retval, duration; 153 char prog_name[32]; 154 char buff[128] = {}; 155 156 err = bpf_prog_load("tailcall2.o", BPF_PROG_TYPE_SCHED_CLS, &obj, 157 &prog_fd); 158 if (CHECK_FAIL(err)) 159 return; 160 161 prog = bpf_object__find_program_by_title(obj, "classifier"); 162 if (CHECK_FAIL(!prog)) 163 goto out; 164 165 main_fd = bpf_program__fd(prog); 166 if (CHECK_FAIL(main_fd < 0)) 167 goto out; 168 169 prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); 170 if (CHECK_FAIL(!prog_array)) 171 goto out; 172 173 map_fd = bpf_map__fd(prog_array); 174 if (CHECK_FAIL(map_fd < 0)) 175 goto out; 176 177 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 178 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); 179 180 prog = bpf_object__find_program_by_title(obj, prog_name); 181 if (CHECK_FAIL(!prog)) 182 goto out; 183 184 prog_fd = bpf_program__fd(prog); 185 if (CHECK_FAIL(prog_fd < 0)) 186 goto out; 187 188 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); 189 if (CHECK_FAIL(err)) 190 goto out; 191 } 192 193 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 194 &duration, &retval, NULL); 195 CHECK(err || retval != 2, "tailcall", "err %d errno %d retval %d\n", 196 err, errno, retval); 197 198 i = 2; 199 err = bpf_map_delete_elem(map_fd, &i); 200 if (CHECK_FAIL(err)) 201 goto out; 202 203 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 204 &duration, &retval, NULL); 205 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", 206 err, errno, retval); 207 208 i = 0; 209 err = bpf_map_delete_elem(map_fd, &i); 210 if (CHECK_FAIL(err)) 211 goto out; 212 213 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 214 &duration, &retval, NULL); 215 CHECK(err || retval != 3, "tailcall", "err %d errno %d retval %d\n", 216 err, errno, retval); 217 out: 218 bpf_object__close(obj); 219 } 220 221 /* test_tailcall_3 checks that the count value of the tail call limit 222 * enforcement matches with expectations. 223 */ 224 static void test_tailcall_3(void) 225 { 226 int err, map_fd, prog_fd, main_fd, data_fd, i, val; 227 struct bpf_map *prog_array, *data_map; 228 struct bpf_program *prog; 229 struct bpf_object *obj; 230 __u32 retval, duration; 231 char buff[128] = {}; 232 233 err = bpf_prog_load("tailcall3.o", BPF_PROG_TYPE_SCHED_CLS, &obj, 234 &prog_fd); 235 if (CHECK_FAIL(err)) 236 return; 237 238 prog = bpf_object__find_program_by_title(obj, "classifier"); 239 if (CHECK_FAIL(!prog)) 240 goto out; 241 242 main_fd = bpf_program__fd(prog); 243 if (CHECK_FAIL(main_fd < 0)) 244 goto out; 245 246 prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); 247 if (CHECK_FAIL(!prog_array)) 248 goto out; 249 250 map_fd = bpf_map__fd(prog_array); 251 if (CHECK_FAIL(map_fd < 0)) 252 goto out; 253 254 prog = bpf_object__find_program_by_title(obj, "classifier/0"); 255 if (CHECK_FAIL(!prog)) 256 goto out; 257 258 prog_fd = bpf_program__fd(prog); 259 if (CHECK_FAIL(prog_fd < 0)) 260 goto out; 261 262 i = 0; 263 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); 264 if (CHECK_FAIL(err)) 265 goto out; 266 267 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 268 &duration, &retval, NULL); 269 CHECK(err || retval != 1, "tailcall", "err %d errno %d retval %d\n", 270 err, errno, retval); 271 272 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); 273 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) 274 return; 275 276 data_fd = bpf_map__fd(data_map); 277 if (CHECK_FAIL(map_fd < 0)) 278 return; 279 280 i = 0; 281 err = bpf_map_lookup_elem(data_fd, &i, &val); 282 CHECK(err || val != 33, "tailcall count", "err %d errno %d count %d\n", 283 err, errno, val); 284 285 i = 0; 286 err = bpf_map_delete_elem(map_fd, &i); 287 if (CHECK_FAIL(err)) 288 goto out; 289 290 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 291 &duration, &retval, NULL); 292 CHECK(err || retval != 0, "tailcall", "err %d errno %d retval %d\n", 293 err, errno, retval); 294 out: 295 bpf_object__close(obj); 296 } 297 298 /* test_tailcall_4 checks that the kernel properly selects indirect jump 299 * for the case where the key is not known. Latter is passed via global 300 * data to select different targets we can compare return value of. 301 */ 302 static void test_tailcall_4(void) 303 { 304 int err, map_fd, prog_fd, main_fd, data_fd, i; 305 struct bpf_map *prog_array, *data_map; 306 struct bpf_program *prog; 307 struct bpf_object *obj; 308 __u32 retval, duration; 309 static const int zero = 0; 310 char buff[128] = {}; 311 char prog_name[32]; 312 313 err = bpf_prog_load("tailcall4.o", BPF_PROG_TYPE_SCHED_CLS, &obj, 314 &prog_fd); 315 if (CHECK_FAIL(err)) 316 return; 317 318 prog = bpf_object__find_program_by_title(obj, "classifier"); 319 if (CHECK_FAIL(!prog)) 320 goto out; 321 322 main_fd = bpf_program__fd(prog); 323 if (CHECK_FAIL(main_fd < 0)) 324 goto out; 325 326 prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); 327 if (CHECK_FAIL(!prog_array)) 328 goto out; 329 330 map_fd = bpf_map__fd(prog_array); 331 if (CHECK_FAIL(map_fd < 0)) 332 goto out; 333 334 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); 335 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) 336 return; 337 338 data_fd = bpf_map__fd(data_map); 339 if (CHECK_FAIL(map_fd < 0)) 340 return; 341 342 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 343 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); 344 345 prog = bpf_object__find_program_by_title(obj, prog_name); 346 if (CHECK_FAIL(!prog)) 347 goto out; 348 349 prog_fd = bpf_program__fd(prog); 350 if (CHECK_FAIL(prog_fd < 0)) 351 goto out; 352 353 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); 354 if (CHECK_FAIL(err)) 355 goto out; 356 } 357 358 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 359 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); 360 if (CHECK_FAIL(err)) 361 goto out; 362 363 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 364 &duration, &retval, NULL); 365 CHECK(err || retval != i, "tailcall", 366 "err %d errno %d retval %d\n", err, errno, retval); 367 } 368 369 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 370 err = bpf_map_update_elem(data_fd, &zero, &i, BPF_ANY); 371 if (CHECK_FAIL(err)) 372 goto out; 373 374 err = bpf_map_delete_elem(map_fd, &i); 375 if (CHECK_FAIL(err)) 376 goto out; 377 378 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 379 &duration, &retval, NULL); 380 CHECK(err || retval != 3, "tailcall", 381 "err %d errno %d retval %d\n", err, errno, retval); 382 } 383 out: 384 bpf_object__close(obj); 385 } 386 387 /* test_tailcall_5 probes similarly to test_tailcall_4 that the kernel generates 388 * an indirect jump when the keys are const but different from different branches. 389 */ 390 static void test_tailcall_5(void) 391 { 392 int err, map_fd, prog_fd, main_fd, data_fd, i, key[] = { 1111, 1234, 5678 }; 393 struct bpf_map *prog_array, *data_map; 394 struct bpf_program *prog; 395 struct bpf_object *obj; 396 __u32 retval, duration; 397 static const int zero = 0; 398 char buff[128] = {}; 399 char prog_name[32]; 400 401 err = bpf_prog_load("tailcall5.o", BPF_PROG_TYPE_SCHED_CLS, &obj, 402 &prog_fd); 403 if (CHECK_FAIL(err)) 404 return; 405 406 prog = bpf_object__find_program_by_title(obj, "classifier"); 407 if (CHECK_FAIL(!prog)) 408 goto out; 409 410 main_fd = bpf_program__fd(prog); 411 if (CHECK_FAIL(main_fd < 0)) 412 goto out; 413 414 prog_array = bpf_object__find_map_by_name(obj, "jmp_table"); 415 if (CHECK_FAIL(!prog_array)) 416 goto out; 417 418 map_fd = bpf_map__fd(prog_array); 419 if (CHECK_FAIL(map_fd < 0)) 420 goto out; 421 422 data_map = bpf_object__find_map_by_name(obj, "tailcall.bss"); 423 if (CHECK_FAIL(!data_map || !bpf_map__is_internal(data_map))) 424 return; 425 426 data_fd = bpf_map__fd(data_map); 427 if (CHECK_FAIL(map_fd < 0)) 428 return; 429 430 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 431 snprintf(prog_name, sizeof(prog_name), "classifier/%i", i); 432 433 prog = bpf_object__find_program_by_title(obj, prog_name); 434 if (CHECK_FAIL(!prog)) 435 goto out; 436 437 prog_fd = bpf_program__fd(prog); 438 if (CHECK_FAIL(prog_fd < 0)) 439 goto out; 440 441 err = bpf_map_update_elem(map_fd, &i, &prog_fd, BPF_ANY); 442 if (CHECK_FAIL(err)) 443 goto out; 444 } 445 446 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 447 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); 448 if (CHECK_FAIL(err)) 449 goto out; 450 451 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 452 &duration, &retval, NULL); 453 CHECK(err || retval != i, "tailcall", 454 "err %d errno %d retval %d\n", err, errno, retval); 455 } 456 457 for (i = 0; i < bpf_map__def(prog_array)->max_entries; i++) { 458 err = bpf_map_update_elem(data_fd, &zero, &key[i], BPF_ANY); 459 if (CHECK_FAIL(err)) 460 goto out; 461 462 err = bpf_map_delete_elem(map_fd, &i); 463 if (CHECK_FAIL(err)) 464 goto out; 465 466 err = bpf_prog_test_run(main_fd, 1, buff, sizeof(buff), 0, 467 &duration, &retval, NULL); 468 CHECK(err || retval != 3, "tailcall", 469 "err %d errno %d retval %d\n", err, errno, retval); 470 } 471 out: 472 bpf_object__close(obj); 473 } 474 475 void test_tailcalls(void) 476 { 477 if (test__start_subtest("tailcall_1")) 478 test_tailcall_1(); 479 if (test__start_subtest("tailcall_2")) 480 test_tailcall_2(); 481 if (test__start_subtest("tailcall_3")) 482 test_tailcall_3(); 483 if (test__start_subtest("tailcall_4")) 484 test_tailcall_4(); 485 if (test__start_subtest("tailcall_5")) 486 test_tailcall_5(); 487 } 488