1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) 2 /* Copyright (c) 2021 Facebook */ 3 #include <stdio.h> 4 #include <stdlib.h> 5 #include <string.h> 6 #include <errno.h> 7 #include <linux/filter.h> 8 #include <sys/param.h> 9 #include "btf.h" 10 #include "bpf.h" 11 #include "libbpf.h" 12 #include "libbpf_internal.h" 13 #include "hashmap.h" 14 #include "bpf_gen_internal.h" 15 #include "skel_internal.h" 16 #include <asm/byteorder.h> 17 18 #define MAX_USED_MAPS 64 19 #define MAX_USED_PROGS 32 20 #define MAX_KFUNC_DESCS 256 21 #define MAX_FD_ARRAY_SZ (MAX_USED_MAPS + MAX_KFUNC_DESCS) 22 23 /* The following structure describes the stack layout of the loader program. 24 * In addition R6 contains the pointer to context. 25 * R7 contains the result of the last sys_bpf command (typically error or FD). 26 * R9 contains the result of the last sys_close command. 27 * 28 * Naming convention: 29 * ctx - bpf program context 30 * stack - bpf program stack 31 * blob - bpf_attr-s, strings, insns, map data. 32 * All the bytes that loader prog will use for read/write. 33 */ 34 struct loader_stack { 35 __u32 btf_fd; 36 __u32 inner_map_fd; 37 __u32 prog_fd[MAX_USED_PROGS]; 38 }; 39 40 #define stack_off(field) \ 41 (__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field)) 42 43 #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field)) 44 45 static int blob_fd_array_off(struct bpf_gen *gen, int index) 46 { 47 return gen->fd_array + index * sizeof(int); 48 } 49 50 static int realloc_insn_buf(struct bpf_gen *gen, __u32 size) 51 { 52 size_t off = gen->insn_cur - gen->insn_start; 53 void *insn_start; 54 55 if (gen->error) 56 return gen->error; 57 if (size > INT32_MAX || off + size > INT32_MAX) { 58 gen->error = -ERANGE; 59 return -ERANGE; 60 } 61 insn_start = realloc(gen->insn_start, off + size); 62 if (!insn_start) { 63 gen->error = -ENOMEM; 64 free(gen->insn_start); 65 gen->insn_start = NULL; 66 return -ENOMEM; 67 } 68 gen->insn_start = insn_start; 69 gen->insn_cur = insn_start + off; 70 return 0; 71 } 72 73 static int realloc_data_buf(struct bpf_gen *gen, __u32 size) 74 { 75 size_t off = gen->data_cur - gen->data_start; 76 void *data_start; 77 78 if (gen->error) 79 return gen->error; 80 if (size > INT32_MAX || off + size > INT32_MAX) { 81 gen->error = -ERANGE; 82 return -ERANGE; 83 } 84 data_start = realloc(gen->data_start, off + size); 85 if (!data_start) { 86 gen->error = -ENOMEM; 87 free(gen->data_start); 88 gen->data_start = NULL; 89 return -ENOMEM; 90 } 91 gen->data_start = data_start; 92 gen->data_cur = data_start + off; 93 return 0; 94 } 95 96 static void emit(struct bpf_gen *gen, struct bpf_insn insn) 97 { 98 if (realloc_insn_buf(gen, sizeof(insn))) 99 return; 100 memcpy(gen->insn_cur, &insn, sizeof(insn)); 101 gen->insn_cur += sizeof(insn); 102 } 103 104 static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2) 105 { 106 emit(gen, insn1); 107 emit(gen, insn2); 108 } 109 110 static int add_data(struct bpf_gen *gen, const void *data, __u32 size); 111 static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off); 112 113 void bpf_gen__init(struct bpf_gen *gen, int log_level, int nr_progs, int nr_maps) 114 { 115 size_t stack_sz = sizeof(struct loader_stack), nr_progs_sz; 116 int i; 117 118 gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int)); 119 gen->log_level = log_level; 120 /* save ctx pointer into R6 */ 121 emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1)); 122 123 /* bzero stack */ 124 emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10)); 125 emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz)); 126 emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz)); 127 emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0)); 128 emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel)); 129 130 /* amount of stack actually used, only used to calculate iterations, not stack offset */ 131 nr_progs_sz = offsetof(struct loader_stack, prog_fd[nr_progs]); 132 /* jump over cleanup code */ 133 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 134 /* size of cleanup code below (including map fd cleanup) */ 135 (nr_progs_sz / 4) * 3 + 2 + 136 /* 6 insns for emit_sys_close_blob, 137 * 6 insns for debug_regs in emit_sys_close_blob 138 */ 139 nr_maps * (6 + (gen->log_level ? 6 : 0)))); 140 141 /* remember the label where all error branches will jump to */ 142 gen->cleanup_label = gen->insn_cur - gen->insn_start; 143 /* emit cleanup code: close all temp FDs */ 144 for (i = 0; i < nr_progs_sz; i += 4) { 145 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i)); 146 emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1)); 147 emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close)); 148 } 149 for (i = 0; i < nr_maps; i++) 150 emit_sys_close_blob(gen, blob_fd_array_off(gen, i)); 151 /* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */ 152 emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7)); 153 emit(gen, BPF_EXIT_INSN()); 154 } 155 156 static int add_data(struct bpf_gen *gen, const void *data, __u32 size) 157 { 158 __u32 size8 = roundup(size, 8); 159 __u64 zero = 0; 160 void *prev; 161 162 if (realloc_data_buf(gen, size8)) 163 return 0; 164 prev = gen->data_cur; 165 if (data) { 166 memcpy(gen->data_cur, data, size); 167 memcpy(gen->data_cur + size, &zero, size8 - size); 168 } else { 169 memset(gen->data_cur, 0, size8); 170 } 171 gen->data_cur += size8; 172 return prev - gen->data_start; 173 } 174 175 /* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative 176 * to start of fd_array. Caller can decide if it is usable or not. 177 */ 178 static int add_map_fd(struct bpf_gen *gen) 179 { 180 if (gen->nr_maps == MAX_USED_MAPS) { 181 pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS); 182 gen->error = -E2BIG; 183 return 0; 184 } 185 return gen->nr_maps++; 186 } 187 188 static int add_kfunc_btf_fd(struct bpf_gen *gen) 189 { 190 int cur; 191 192 if (gen->nr_fd_array == MAX_KFUNC_DESCS) { 193 cur = add_data(gen, NULL, sizeof(int)); 194 return (cur - gen->fd_array) / sizeof(int); 195 } 196 return MAX_USED_MAPS + gen->nr_fd_array++; 197 } 198 199 static int insn_bytes_to_bpf_size(__u32 sz) 200 { 201 switch (sz) { 202 case 8: return BPF_DW; 203 case 4: return BPF_W; 204 case 2: return BPF_H; 205 case 1: return BPF_B; 206 default: return -1; 207 } 208 } 209 210 /* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */ 211 static void emit_rel_store(struct bpf_gen *gen, int off, int data) 212 { 213 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 214 0, 0, 0, data)); 215 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 216 0, 0, 0, off)); 217 emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0)); 218 } 219 220 static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off) 221 { 222 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE, 223 0, 0, 0, blob_off)); 224 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0)); 225 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 226 0, 0, 0, off)); 227 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0)); 228 } 229 230 static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off) 231 { 232 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 233 0, 0, 0, blob_off)); 234 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0)); 235 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off)); 236 } 237 238 static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off, 239 bool check_non_zero) 240 { 241 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off)); 242 if (check_non_zero) 243 /* If value in ctx is zero don't update the blob. 244 * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c 245 */ 246 emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3)); 247 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 248 0, 0, 0, off)); 249 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0)); 250 } 251 252 static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off) 253 { 254 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off)); 255 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 256 0, 0, 0, off)); 257 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0)); 258 } 259 260 static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off) 261 { 262 emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off)); 263 emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off)); 264 } 265 266 static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size) 267 { 268 emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd)); 269 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE, 270 0, 0, 0, attr)); 271 emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size)); 272 emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf)); 273 /* remember the result in R7 */ 274 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); 275 } 276 277 static bool is_simm16(__s64 value) 278 { 279 return value == (__s64)(__s16)value; 280 } 281 282 static void emit_check_err(struct bpf_gen *gen) 283 { 284 __s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1; 285 286 /* R7 contains result of last sys_bpf command. 287 * if (R7 < 0) goto cleanup; 288 */ 289 if (is_simm16(off)) { 290 emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off)); 291 } else { 292 gen->error = -ERANGE; 293 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1)); 294 } 295 } 296 297 /* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */ 298 static void emit_debug(struct bpf_gen *gen, int reg1, int reg2, 299 const char *fmt, va_list args) 300 { 301 char buf[1024]; 302 int addr, len, ret; 303 304 if (!gen->log_level) 305 return; 306 ret = vsnprintf(buf, sizeof(buf), fmt, args); 307 if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0) 308 /* The special case to accommodate common debug_ret(): 309 * to avoid specifying BPF_REG_7 and adding " r=%%d" to 310 * prints explicitly. 311 */ 312 strcat(buf, " r=%d"); 313 len = strlen(buf) + 1; 314 addr = add_data(gen, buf, len); 315 316 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 317 0, 0, 0, addr)); 318 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); 319 if (reg1 >= 0) 320 emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1)); 321 if (reg2 >= 0) 322 emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2)); 323 emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk)); 324 } 325 326 static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...) 327 { 328 va_list args; 329 330 va_start(args, fmt); 331 emit_debug(gen, reg1, reg2, fmt, args); 332 va_end(args); 333 } 334 335 static void debug_ret(struct bpf_gen *gen, const char *fmt, ...) 336 { 337 va_list args; 338 339 va_start(args, fmt); 340 emit_debug(gen, BPF_REG_7, -1, fmt, args); 341 va_end(args); 342 } 343 344 static void __emit_sys_close(struct bpf_gen *gen) 345 { 346 emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 347 /* 2 is the number of the following insns 348 * * 6 is additional insns in debug_regs 349 */ 350 2 + (gen->log_level ? 6 : 0))); 351 emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1)); 352 emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close)); 353 debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d"); 354 } 355 356 static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off) 357 { 358 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off)); 359 __emit_sys_close(gen); 360 } 361 362 static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off) 363 { 364 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 365 0, 0, 0, blob_off)); 366 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0)); 367 __emit_sys_close(gen); 368 } 369 370 int bpf_gen__finish(struct bpf_gen *gen, int nr_progs, int nr_maps) 371 { 372 int i; 373 374 if (nr_progs < gen->nr_progs || nr_maps != gen->nr_maps) { 375 pr_warn("nr_progs %d/%d nr_maps %d/%d mismatch\n", 376 nr_progs, gen->nr_progs, nr_maps, gen->nr_maps); 377 gen->error = -EFAULT; 378 return gen->error; 379 } 380 emit_sys_close_stack(gen, stack_off(btf_fd)); 381 for (i = 0; i < gen->nr_progs; i++) 382 move_stack2ctx(gen, 383 sizeof(struct bpf_loader_ctx) + 384 sizeof(struct bpf_map_desc) * gen->nr_maps + 385 sizeof(struct bpf_prog_desc) * i + 386 offsetof(struct bpf_prog_desc, prog_fd), 4, 387 stack_off(prog_fd[i])); 388 for (i = 0; i < gen->nr_maps; i++) 389 move_blob2ctx(gen, 390 sizeof(struct bpf_loader_ctx) + 391 sizeof(struct bpf_map_desc) * i + 392 offsetof(struct bpf_map_desc, map_fd), 4, 393 blob_fd_array_off(gen, i)); 394 emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0)); 395 emit(gen, BPF_EXIT_INSN()); 396 pr_debug("gen: finish %d\n", gen->error); 397 if (!gen->error) { 398 struct gen_loader_opts *opts = gen->opts; 399 400 opts->insns = gen->insn_start; 401 opts->insns_sz = gen->insn_cur - gen->insn_start; 402 opts->data = gen->data_start; 403 opts->data_sz = gen->data_cur - gen->data_start; 404 } 405 return gen->error; 406 } 407 408 void bpf_gen__free(struct bpf_gen *gen) 409 { 410 if (!gen) 411 return; 412 free(gen->data_start); 413 free(gen->insn_start); 414 free(gen); 415 } 416 417 void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data, 418 __u32 btf_raw_size) 419 { 420 int attr_size = offsetofend(union bpf_attr, btf_log_level); 421 int btf_data, btf_load_attr; 422 union bpf_attr attr; 423 424 memset(&attr, 0, attr_size); 425 pr_debug("gen: load_btf: size %d\n", btf_raw_size); 426 btf_data = add_data(gen, btf_raw_data, btf_raw_size); 427 428 attr.btf_size = btf_raw_size; 429 btf_load_attr = add_data(gen, &attr, attr_size); 430 431 /* populate union bpf_attr with user provided log details */ 432 move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4, 433 offsetof(struct bpf_loader_ctx, log_level), false); 434 move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4, 435 offsetof(struct bpf_loader_ctx, log_size), false); 436 move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8, 437 offsetof(struct bpf_loader_ctx, log_buf), false); 438 /* populate union bpf_attr with a pointer to the BTF data */ 439 emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data); 440 /* emit BTF_LOAD command */ 441 emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size); 442 debug_ret(gen, "btf_load size %d", btf_raw_size); 443 emit_check_err(gen); 444 /* remember btf_fd in the stack, if successful */ 445 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd))); 446 } 447 448 void bpf_gen__map_create(struct bpf_gen *gen, 449 enum bpf_map_type map_type, 450 const char *map_name, 451 __u32 key_size, __u32 value_size, __u32 max_entries, 452 struct bpf_map_create_opts *map_attr, int map_idx) 453 { 454 int attr_size = offsetofend(union bpf_attr, map_extra); 455 bool close_inner_map_fd = false; 456 int map_create_attr, idx; 457 union bpf_attr attr; 458 459 memset(&attr, 0, attr_size); 460 attr.map_type = map_type; 461 attr.key_size = key_size; 462 attr.value_size = value_size; 463 attr.map_flags = map_attr->map_flags; 464 attr.map_extra = map_attr->map_extra; 465 if (map_name) 466 libbpf_strlcpy(attr.map_name, map_name, sizeof(attr.map_name)); 467 attr.numa_node = map_attr->numa_node; 468 attr.map_ifindex = map_attr->map_ifindex; 469 attr.max_entries = max_entries; 470 attr.btf_key_type_id = map_attr->btf_key_type_id; 471 attr.btf_value_type_id = map_attr->btf_value_type_id; 472 473 pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n", 474 attr.map_name, map_idx, map_type, attr.btf_value_type_id); 475 476 map_create_attr = add_data(gen, &attr, attr_size); 477 if (attr.btf_value_type_id) 478 /* populate union bpf_attr with btf_fd saved in the stack earlier */ 479 move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4, 480 stack_off(btf_fd)); 481 switch (attr.map_type) { 482 case BPF_MAP_TYPE_ARRAY_OF_MAPS: 483 case BPF_MAP_TYPE_HASH_OF_MAPS: 484 move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4, 485 stack_off(inner_map_fd)); 486 close_inner_map_fd = true; 487 break; 488 default: 489 break; 490 } 491 /* conditionally update max_entries */ 492 if (map_idx >= 0) 493 move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4, 494 sizeof(struct bpf_loader_ctx) + 495 sizeof(struct bpf_map_desc) * map_idx + 496 offsetof(struct bpf_map_desc, max_entries), 497 true /* check that max_entries != 0 */); 498 /* emit MAP_CREATE command */ 499 emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size); 500 debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d", 501 attr.map_name, map_idx, map_type, value_size, 502 attr.btf_value_type_id); 503 emit_check_err(gen); 504 /* remember map_fd in the stack, if successful */ 505 if (map_idx < 0) { 506 /* This bpf_gen__map_create() function is called with map_idx >= 0 507 * for all maps that libbpf loading logic tracks. 508 * It's called with -1 to create an inner map. 509 */ 510 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, 511 stack_off(inner_map_fd))); 512 } else if (map_idx != gen->nr_maps) { 513 gen->error = -EDOM; /* internal bug */ 514 return; 515 } else { 516 /* add_map_fd does gen->nr_maps++ */ 517 idx = add_map_fd(gen); 518 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 519 0, 0, 0, blob_fd_array_off(gen, idx))); 520 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0)); 521 } 522 if (close_inner_map_fd) 523 emit_sys_close_stack(gen, stack_off(inner_map_fd)); 524 } 525 526 void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name, 527 enum bpf_attach_type type) 528 { 529 const char *prefix; 530 int kind, ret; 531 532 btf_get_kernel_prefix_kind(type, &prefix, &kind); 533 gen->attach_kind = kind; 534 ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s", 535 prefix, attach_name); 536 if (ret == sizeof(gen->attach_target)) 537 gen->error = -ENOSPC; 538 } 539 540 static void emit_find_attach_target(struct bpf_gen *gen) 541 { 542 int name, len = strlen(gen->attach_target) + 1; 543 544 pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind); 545 name = add_data(gen, gen->attach_target, len); 546 547 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 548 0, 0, 0, name)); 549 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); 550 emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind)); 551 emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0)); 552 emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind)); 553 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); 554 debug_ret(gen, "find_by_name_kind(%s,%d)", 555 gen->attach_target, gen->attach_kind); 556 emit_check_err(gen); 557 /* if successful, btf_id is in lower 32-bit of R7 and 558 * btf_obj_fd is in upper 32-bit 559 */ 560 } 561 562 void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak, 563 bool is_typeless, int kind, int insn_idx) 564 { 565 struct ksym_relo_desc *relo; 566 567 relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo)); 568 if (!relo) { 569 gen->error = -ENOMEM; 570 return; 571 } 572 gen->relos = relo; 573 relo += gen->relo_cnt; 574 relo->name = name; 575 relo->is_weak = is_weak; 576 relo->is_typeless = is_typeless; 577 relo->kind = kind; 578 relo->insn_idx = insn_idx; 579 gen->relo_cnt++; 580 } 581 582 /* returns existing ksym_desc with ref incremented, or inserts a new one */ 583 static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo) 584 { 585 struct ksym_desc *kdesc; 586 int i; 587 588 for (i = 0; i < gen->nr_ksyms; i++) { 589 if (!strcmp(gen->ksyms[i].name, relo->name)) { 590 gen->ksyms[i].ref++; 591 return &gen->ksyms[i]; 592 } 593 } 594 kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc)); 595 if (!kdesc) { 596 gen->error = -ENOMEM; 597 return NULL; 598 } 599 gen->ksyms = kdesc; 600 kdesc = &gen->ksyms[gen->nr_ksyms++]; 601 kdesc->name = relo->name; 602 kdesc->kind = relo->kind; 603 kdesc->ref = 1; 604 kdesc->off = 0; 605 kdesc->insn = 0; 606 return kdesc; 607 } 608 609 /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7} 610 * Returns result in BPF_REG_7 611 */ 612 static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo) 613 { 614 int name_off, len = strlen(relo->name) + 1; 615 616 name_off = add_data(gen, relo->name, len); 617 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 618 0, 0, 0, name_off)); 619 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); 620 emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind)); 621 emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0)); 622 emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind)); 623 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); 624 debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind); 625 } 626 627 /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7} 628 * Returns result in BPF_REG_7 629 * Returns u64 symbol addr in BPF_REG_9 630 */ 631 static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo) 632 { 633 int name_off, len = strlen(relo->name) + 1, res_off; 634 635 name_off = add_data(gen, relo->name, len); 636 res_off = add_data(gen, NULL, 8); /* res is u64 */ 637 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 638 0, 0, 0, name_off)); 639 emit(gen, BPF_MOV64_IMM(BPF_REG_2, len)); 640 emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0)); 641 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE, 642 0, 0, 0, res_off)); 643 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4)); 644 emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name)); 645 emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0)); 646 emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0)); 647 debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind); 648 } 649 650 /* Expects: 651 * BPF_REG_8 - pointer to instruction 652 * 653 * We need to reuse BTF fd for same symbol otherwise each relocation takes a new 654 * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols, 655 * this would mean a new BTF fd index for each entry. By pairing symbol name 656 * with index, we get the insn->imm, insn->off pairing that kernel uses for 657 * kfunc_tab, which becomes the effective limit even though all of them may 658 * share same index in fd_array (such that kfunc_btf_tab has 1 element). 659 */ 660 static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn) 661 { 662 struct ksym_desc *kdesc; 663 int btf_fd_idx; 664 665 kdesc = get_ksym_desc(gen, relo); 666 if (!kdesc) 667 return; 668 /* try to copy from existing bpf_insn */ 669 if (kdesc->ref > 1) { 670 move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, 671 kdesc->insn + offsetof(struct bpf_insn, imm)); 672 move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2, 673 kdesc->insn + offsetof(struct bpf_insn, off)); 674 goto log; 675 } 676 /* remember insn offset, so we can copy BTF ID and FD later */ 677 kdesc->insn = insn; 678 emit_bpf_find_by_name_kind(gen, relo); 679 if (!relo->is_weak) 680 emit_check_err(gen); 681 /* get index in fd_array to store BTF FD at */ 682 btf_fd_idx = add_kfunc_btf_fd(gen); 683 if (btf_fd_idx > INT16_MAX) { 684 pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n", 685 btf_fd_idx, relo->name); 686 gen->error = -E2BIG; 687 return; 688 } 689 kdesc->off = btf_fd_idx; 690 /* jump to success case */ 691 emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); 692 /* set value for imm, off as 0 */ 693 emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); 694 emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); 695 /* skip success case for ret < 0 */ 696 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 10)); 697 /* store btf_id into insn[insn_idx].imm */ 698 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); 699 /* obtain fd in BPF_REG_9 */ 700 emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7)); 701 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); 702 /* jump to fd_array store if fd denotes module BTF */ 703 emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 2)); 704 /* set the default value for off */ 705 emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0)); 706 /* skip BTF fd store for vmlinux BTF */ 707 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4)); 708 /* load fd_array slot pointer */ 709 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 710 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx))); 711 /* store BTF fd in slot */ 712 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0)); 713 /* store index into insn[insn_idx].off */ 714 emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx)); 715 log: 716 if (!gen->log_level) 717 return; 718 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8, 719 offsetof(struct bpf_insn, imm))); 720 emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, 721 offsetof(struct bpf_insn, off))); 722 debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d", 723 relo->name, kdesc->ref); 724 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 725 0, 0, 0, blob_fd_array_off(gen, kdesc->off))); 726 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0)); 727 debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd", 728 relo->name, kdesc->ref); 729 } 730 731 static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo, 732 int ref) 733 { 734 if (!gen->log_level) 735 return; 736 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8, 737 offsetof(struct bpf_insn, imm))); 738 emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) + 739 offsetof(struct bpf_insn, imm))); 740 debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d", 741 relo->is_typeless, relo->is_weak, relo->name, ref); 742 emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); 743 debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg", 744 relo->is_typeless, relo->is_weak, relo->name, ref); 745 } 746 747 /* Expects: 748 * BPF_REG_8 - pointer to instruction 749 */ 750 static void emit_relo_ksym_typeless(struct bpf_gen *gen, 751 struct ksym_relo_desc *relo, int insn) 752 { 753 struct ksym_desc *kdesc; 754 755 kdesc = get_ksym_desc(gen, relo); 756 if (!kdesc) 757 return; 758 /* try to copy from existing ldimm64 insn */ 759 if (kdesc->ref > 1) { 760 move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, 761 kdesc->insn + offsetof(struct bpf_insn, imm)); 762 move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, 763 kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); 764 goto log; 765 } 766 /* remember insn offset, so we can copy ksym addr later */ 767 kdesc->insn = insn; 768 /* skip typeless ksym_desc in fd closing loop in cleanup_relos */ 769 kdesc->typeless = true; 770 emit_bpf_kallsyms_lookup_name(gen, relo); 771 emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1)); 772 emit_check_err(gen); 773 /* store lower half of addr into insn[insn_idx].imm */ 774 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm))); 775 /* store upper half of addr into insn[insn_idx + 1].imm */ 776 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32)); 777 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, 778 sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); 779 log: 780 emit_ksym_relo_log(gen, relo, kdesc->ref); 781 } 782 783 static __u32 src_reg_mask(void) 784 { 785 #if defined(__LITTLE_ENDIAN_BITFIELD) 786 return 0x0f; /* src_reg,dst_reg,... */ 787 #elif defined(__BIG_ENDIAN_BITFIELD) 788 return 0xf0; /* dst_reg,src_reg,... */ 789 #else 790 #error "Unsupported bit endianness, cannot proceed" 791 #endif 792 } 793 794 /* Expects: 795 * BPF_REG_8 - pointer to instruction 796 */ 797 static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn) 798 { 799 struct ksym_desc *kdesc; 800 __u32 reg_mask; 801 802 kdesc = get_ksym_desc(gen, relo); 803 if (!kdesc) 804 return; 805 /* try to copy from existing ldimm64 insn */ 806 if (kdesc->ref > 1) { 807 move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4, 808 kdesc->insn + offsetof(struct bpf_insn, imm)); 809 move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4, 810 kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)); 811 /* jump over src_reg adjustment if imm is not 0, reuse BPF_REG_0 from move_blob2blob */ 812 emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 3)); 813 goto clear_src_reg; 814 } 815 /* remember insn offset, so we can copy BTF ID and FD later */ 816 kdesc->insn = insn; 817 emit_bpf_find_by_name_kind(gen, relo); 818 if (!relo->is_weak) 819 emit_check_err(gen); 820 /* jump to success case */ 821 emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); 822 /* set values for insn[insn_idx].imm, insn[insn_idx + 1].imm as 0 */ 823 emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0)); 824 emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0)); 825 /* skip success case for ret < 0 */ 826 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 4)); 827 /* store btf_id into insn[insn_idx].imm */ 828 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm))); 829 /* store btf_obj_fd into insn[insn_idx + 1].imm */ 830 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32)); 831 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, 832 sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm))); 833 /* skip src_reg adjustment */ 834 emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3)); 835 clear_src_reg: 836 /* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */ 837 reg_mask = src_reg_mask(); 838 emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code))); 839 emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask)); 840 emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code))); 841 842 emit_ksym_relo_log(gen, relo, kdesc->ref); 843 } 844 845 void bpf_gen__record_relo_core(struct bpf_gen *gen, 846 const struct bpf_core_relo *core_relo) 847 { 848 struct bpf_core_relo *relos; 849 850 relos = libbpf_reallocarray(gen->core_relos, gen->core_relo_cnt + 1, sizeof(*relos)); 851 if (!relos) { 852 gen->error = -ENOMEM; 853 return; 854 } 855 gen->core_relos = relos; 856 relos += gen->core_relo_cnt; 857 memcpy(relos, core_relo, sizeof(*relos)); 858 gen->core_relo_cnt++; 859 } 860 861 static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns) 862 { 863 int insn; 864 865 pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx); 866 insn = insns + sizeof(struct bpf_insn) * relo->insn_idx; 867 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn)); 868 switch (relo->kind) { 869 case BTF_KIND_VAR: 870 if (relo->is_typeless) 871 emit_relo_ksym_typeless(gen, relo, insn); 872 else 873 emit_relo_ksym_btf(gen, relo, insn); 874 break; 875 case BTF_KIND_FUNC: 876 emit_relo_kfunc_btf(gen, relo, insn); 877 break; 878 default: 879 pr_warn("Unknown relocation kind '%d'\n", relo->kind); 880 gen->error = -EDOM; 881 return; 882 } 883 } 884 885 static void emit_relos(struct bpf_gen *gen, int insns) 886 { 887 int i; 888 889 for (i = 0; i < gen->relo_cnt; i++) 890 emit_relo(gen, gen->relos + i, insns); 891 } 892 893 static void cleanup_core_relo(struct bpf_gen *gen) 894 { 895 if (!gen->core_relo_cnt) 896 return; 897 free(gen->core_relos); 898 gen->core_relo_cnt = 0; 899 gen->core_relos = NULL; 900 } 901 902 static void cleanup_relos(struct bpf_gen *gen, int insns) 903 { 904 int i, insn; 905 906 for (i = 0; i < gen->nr_ksyms; i++) { 907 /* only close fds for typed ksyms and kfuncs */ 908 if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) { 909 /* close fd recorded in insn[insn_idx + 1].imm */ 910 insn = gen->ksyms[i].insn; 911 insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm); 912 emit_sys_close_blob(gen, insn); 913 } else if (gen->ksyms[i].kind == BTF_KIND_FUNC) { 914 emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off)); 915 if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ) 916 gen->nr_fd_array--; 917 } 918 } 919 if (gen->nr_ksyms) { 920 free(gen->ksyms); 921 gen->nr_ksyms = 0; 922 gen->ksyms = NULL; 923 } 924 if (gen->relo_cnt) { 925 free(gen->relos); 926 gen->relo_cnt = 0; 927 gen->relos = NULL; 928 } 929 cleanup_core_relo(gen); 930 } 931 932 void bpf_gen__prog_load(struct bpf_gen *gen, 933 enum bpf_prog_type prog_type, const char *prog_name, 934 const char *license, struct bpf_insn *insns, size_t insn_cnt, 935 struct bpf_prog_load_opts *load_attr, int prog_idx) 936 { 937 int prog_load_attr, license_off, insns_off, func_info, line_info, core_relos; 938 int attr_size = offsetofend(union bpf_attr, core_relo_rec_size); 939 union bpf_attr attr; 940 941 memset(&attr, 0, attr_size); 942 pr_debug("gen: prog_load: type %d insns_cnt %zd progi_idx %d\n", 943 prog_type, insn_cnt, prog_idx); 944 /* add license string to blob of bytes */ 945 license_off = add_data(gen, license, strlen(license) + 1); 946 /* add insns to blob of bytes */ 947 insns_off = add_data(gen, insns, insn_cnt * sizeof(struct bpf_insn)); 948 949 attr.prog_type = prog_type; 950 attr.expected_attach_type = load_attr->expected_attach_type; 951 attr.attach_btf_id = load_attr->attach_btf_id; 952 attr.prog_ifindex = load_attr->prog_ifindex; 953 attr.kern_version = 0; 954 attr.insn_cnt = (__u32)insn_cnt; 955 attr.prog_flags = load_attr->prog_flags; 956 957 attr.func_info_rec_size = load_attr->func_info_rec_size; 958 attr.func_info_cnt = load_attr->func_info_cnt; 959 func_info = add_data(gen, load_attr->func_info, 960 attr.func_info_cnt * attr.func_info_rec_size); 961 962 attr.line_info_rec_size = load_attr->line_info_rec_size; 963 attr.line_info_cnt = load_attr->line_info_cnt; 964 line_info = add_data(gen, load_attr->line_info, 965 attr.line_info_cnt * attr.line_info_rec_size); 966 967 attr.core_relo_rec_size = sizeof(struct bpf_core_relo); 968 attr.core_relo_cnt = gen->core_relo_cnt; 969 core_relos = add_data(gen, gen->core_relos, 970 attr.core_relo_cnt * attr.core_relo_rec_size); 971 972 libbpf_strlcpy(attr.prog_name, prog_name, sizeof(attr.prog_name)); 973 prog_load_attr = add_data(gen, &attr, attr_size); 974 975 /* populate union bpf_attr with a pointer to license */ 976 emit_rel_store(gen, attr_field(prog_load_attr, license), license_off); 977 978 /* populate union bpf_attr with a pointer to instructions */ 979 emit_rel_store(gen, attr_field(prog_load_attr, insns), insns_off); 980 981 /* populate union bpf_attr with a pointer to func_info */ 982 emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info); 983 984 /* populate union bpf_attr with a pointer to line_info */ 985 emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info); 986 987 /* populate union bpf_attr with a pointer to core_relos */ 988 emit_rel_store(gen, attr_field(prog_load_attr, core_relos), core_relos); 989 990 /* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */ 991 emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array); 992 993 /* populate union bpf_attr with user provided log details */ 994 move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4, 995 offsetof(struct bpf_loader_ctx, log_level), false); 996 move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4, 997 offsetof(struct bpf_loader_ctx, log_size), false); 998 move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8, 999 offsetof(struct bpf_loader_ctx, log_buf), false); 1000 /* populate union bpf_attr with btf_fd saved in the stack earlier */ 1001 move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4, 1002 stack_off(btf_fd)); 1003 if (gen->attach_kind) { 1004 emit_find_attach_target(gen); 1005 /* populate union bpf_attr with btf_id and btf_obj_fd found by helper */ 1006 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE, 1007 0, 0, 0, prog_load_attr)); 1008 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 1009 offsetof(union bpf_attr, attach_btf_id))); 1010 emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32)); 1011 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 1012 offsetof(union bpf_attr, attach_btf_obj_fd))); 1013 } 1014 emit_relos(gen, insns_off); 1015 /* emit PROG_LOAD command */ 1016 emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size); 1017 debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt); 1018 /* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */ 1019 cleanup_relos(gen, insns_off); 1020 if (gen->attach_kind) { 1021 emit_sys_close_blob(gen, 1022 attr_field(prog_load_attr, attach_btf_obj_fd)); 1023 gen->attach_kind = 0; 1024 } 1025 emit_check_err(gen); 1026 /* remember prog_fd in the stack, if successful */ 1027 emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, 1028 stack_off(prog_fd[gen->nr_progs]))); 1029 gen->nr_progs++; 1030 } 1031 1032 void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue, 1033 __u32 value_size) 1034 { 1035 int attr_size = offsetofend(union bpf_attr, flags); 1036 int map_update_attr, value, key; 1037 union bpf_attr attr; 1038 int zero = 0; 1039 1040 memset(&attr, 0, attr_size); 1041 pr_debug("gen: map_update_elem: idx %d\n", map_idx); 1042 1043 value = add_data(gen, pvalue, value_size); 1044 key = add_data(gen, &zero, sizeof(zero)); 1045 1046 /* if (map_desc[map_idx].initial_value) { 1047 * if (ctx->flags & BPF_SKEL_KERNEL) 1048 * bpf_probe_read_kernel(value, value_size, initial_value); 1049 * else 1050 * bpf_copy_from_user(value, value_size, initial_value); 1051 * } 1052 */ 1053 emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6, 1054 sizeof(struct bpf_loader_ctx) + 1055 sizeof(struct bpf_map_desc) * map_idx + 1056 offsetof(struct bpf_map_desc, initial_value))); 1057 emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 8)); 1058 emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE, 1059 0, 0, 0, value)); 1060 emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size)); 1061 emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_6, 1062 offsetof(struct bpf_loader_ctx, flags))); 1063 emit(gen, BPF_JMP_IMM(BPF_JSET, BPF_REG_0, BPF_SKEL_KERNEL, 2)); 1064 emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user)); 1065 emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, 1)); 1066 emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel)); 1067 1068 map_update_attr = add_data(gen, &attr, attr_size); 1069 move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, 1070 blob_fd_array_off(gen, map_idx)); 1071 emit_rel_store(gen, attr_field(map_update_attr, key), key); 1072 emit_rel_store(gen, attr_field(map_update_attr, value), value); 1073 /* emit MAP_UPDATE_ELEM command */ 1074 emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size); 1075 debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size); 1076 emit_check_err(gen); 1077 } 1078 1079 void bpf_gen__populate_outer_map(struct bpf_gen *gen, int outer_map_idx, int slot, 1080 int inner_map_idx) 1081 { 1082 int attr_size = offsetofend(union bpf_attr, flags); 1083 int map_update_attr, key; 1084 union bpf_attr attr; 1085 1086 memset(&attr, 0, attr_size); 1087 pr_debug("gen: populate_outer_map: outer %d key %d inner %d\n", 1088 outer_map_idx, slot, inner_map_idx); 1089 1090 key = add_data(gen, &slot, sizeof(slot)); 1091 1092 map_update_attr = add_data(gen, &attr, attr_size); 1093 move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4, 1094 blob_fd_array_off(gen, outer_map_idx)); 1095 emit_rel_store(gen, attr_field(map_update_attr, key), key); 1096 emit_rel_store(gen, attr_field(map_update_attr, value), 1097 blob_fd_array_off(gen, inner_map_idx)); 1098 1099 /* emit MAP_UPDATE_ELEM command */ 1100 emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size); 1101 debug_ret(gen, "populate_outer_map outer %d key %d inner %d", 1102 outer_map_idx, slot, inner_map_idx); 1103 emit_check_err(gen); 1104 } 1105 1106 void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx) 1107 { 1108 int attr_size = offsetofend(union bpf_attr, map_fd); 1109 int map_freeze_attr; 1110 union bpf_attr attr; 1111 1112 memset(&attr, 0, attr_size); 1113 pr_debug("gen: map_freeze: idx %d\n", map_idx); 1114 map_freeze_attr = add_data(gen, &attr, attr_size); 1115 move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4, 1116 blob_fd_array_off(gen, map_idx)); 1117 /* emit MAP_FREEZE command */ 1118 emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size); 1119 debug_ret(gen, "map_freeze"); 1120 emit_check_err(gen); 1121 } 1122