xref: /openbmc/linux/tools/lib/bpf/gen_loader.c (revision 1f012283)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2021 Facebook */
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <errno.h>
7 #include <linux/filter.h>
8 #include <sys/param.h>
9 #include "btf.h"
10 #include "bpf.h"
11 #include "libbpf.h"
12 #include "libbpf_internal.h"
13 #include "hashmap.h"
14 #include "bpf_gen_internal.h"
15 #include "skel_internal.h"
16 #include <asm/byteorder.h>
17 
18 #define MAX_USED_MAPS	64
19 #define MAX_USED_PROGS	32
20 #define MAX_KFUNC_DESCS 256
21 #define MAX_FD_ARRAY_SZ (MAX_USED_PROGS + MAX_KFUNC_DESCS)
22 
23 /* The following structure describes the stack layout of the loader program.
24  * In addition R6 contains the pointer to context.
25  * R7 contains the result of the last sys_bpf command (typically error or FD).
26  * R9 contains the result of the last sys_close command.
27  *
28  * Naming convention:
29  * ctx - bpf program context
30  * stack - bpf program stack
31  * blob - bpf_attr-s, strings, insns, map data.
32  *        All the bytes that loader prog will use for read/write.
33  */
34 struct loader_stack {
35 	__u32 btf_fd;
36 	__u32 prog_fd[MAX_USED_PROGS];
37 	__u32 inner_map_fd;
38 };
39 
40 #define stack_off(field) \
41 	(__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
42 
43 #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
44 
45 static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
46 {
47 	size_t off = gen->insn_cur - gen->insn_start;
48 	void *insn_start;
49 
50 	if (gen->error)
51 		return gen->error;
52 	if (size > INT32_MAX || off + size > INT32_MAX) {
53 		gen->error = -ERANGE;
54 		return -ERANGE;
55 	}
56 	insn_start = realloc(gen->insn_start, off + size);
57 	if (!insn_start) {
58 		gen->error = -ENOMEM;
59 		free(gen->insn_start);
60 		gen->insn_start = NULL;
61 		return -ENOMEM;
62 	}
63 	gen->insn_start = insn_start;
64 	gen->insn_cur = insn_start + off;
65 	return 0;
66 }
67 
68 static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
69 {
70 	size_t off = gen->data_cur - gen->data_start;
71 	void *data_start;
72 
73 	if (gen->error)
74 		return gen->error;
75 	if (size > INT32_MAX || off + size > INT32_MAX) {
76 		gen->error = -ERANGE;
77 		return -ERANGE;
78 	}
79 	data_start = realloc(gen->data_start, off + size);
80 	if (!data_start) {
81 		gen->error = -ENOMEM;
82 		free(gen->data_start);
83 		gen->data_start = NULL;
84 		return -ENOMEM;
85 	}
86 	gen->data_start = data_start;
87 	gen->data_cur = data_start + off;
88 	return 0;
89 }
90 
91 static void emit(struct bpf_gen *gen, struct bpf_insn insn)
92 {
93 	if (realloc_insn_buf(gen, sizeof(insn)))
94 		return;
95 	memcpy(gen->insn_cur, &insn, sizeof(insn));
96 	gen->insn_cur += sizeof(insn);
97 }
98 
99 static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
100 {
101 	emit(gen, insn1);
102 	emit(gen, insn2);
103 }
104 
105 void bpf_gen__init(struct bpf_gen *gen, int log_level)
106 {
107 	size_t stack_sz = sizeof(struct loader_stack);
108 	int i;
109 
110 	gen->log_level = log_level;
111 	/* save ctx pointer into R6 */
112 	emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
113 
114 	/* bzero stack */
115 	emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
116 	emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
117 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
118 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
119 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
120 
121 	/* jump over cleanup code */
122 	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
123 			      /* size of cleanup code below */
124 			      (stack_sz / 4) * 3 + 2));
125 
126 	/* remember the label where all error branches will jump to */
127 	gen->cleanup_label = gen->insn_cur - gen->insn_start;
128 	/* emit cleanup code: close all temp FDs */
129 	for (i = 0; i < stack_sz; i += 4) {
130 		emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
131 		emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
132 		emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
133 	}
134 	/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
135 	emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
136 	emit(gen, BPF_EXIT_INSN());
137 }
138 
139 static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
140 {
141 	__u32 size8 = roundup(size, 8);
142 	__u64 zero = 0;
143 	void *prev;
144 
145 	if (realloc_data_buf(gen, size8))
146 		return 0;
147 	prev = gen->data_cur;
148 	if (data) {
149 		memcpy(gen->data_cur, data, size);
150 		memcpy(gen->data_cur + size, &zero, size8 - size);
151 	} else {
152 		memset(gen->data_cur, 0, size8);
153 	}
154 	gen->data_cur += size8;
155 	return prev - gen->data_start;
156 }
157 
158 /* Get index for map_fd/btf_fd slot in reserved fd_array, or in data relative
159  * to start of fd_array. Caller can decide if it is usable or not.
160  */
161 static int add_map_fd(struct bpf_gen *gen)
162 {
163 	if (!gen->fd_array)
164 		gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
165 	if (gen->nr_maps == MAX_USED_MAPS) {
166 		pr_warn("Total maps exceeds %d\n", MAX_USED_MAPS);
167 		gen->error = -E2BIG;
168 		return 0;
169 	}
170 	return gen->nr_maps++;
171 }
172 
173 static int add_kfunc_btf_fd(struct bpf_gen *gen)
174 {
175 	int cur;
176 
177 	if (!gen->fd_array)
178 		gen->fd_array = add_data(gen, NULL, MAX_FD_ARRAY_SZ * sizeof(int));
179 	if (gen->nr_fd_array == MAX_KFUNC_DESCS) {
180 		cur = add_data(gen, NULL, sizeof(int));
181 		return (cur - gen->fd_array) / sizeof(int);
182 	}
183 	return MAX_USED_MAPS + gen->nr_fd_array++;
184 }
185 
186 static int blob_fd_array_off(struct bpf_gen *gen, int index)
187 {
188 	return gen->fd_array + index * sizeof(int);
189 }
190 
191 static int insn_bytes_to_bpf_size(__u32 sz)
192 {
193 	switch (sz) {
194 	case 8: return BPF_DW;
195 	case 4: return BPF_W;
196 	case 2: return BPF_H;
197 	case 1: return BPF_B;
198 	default: return -1;
199 	}
200 }
201 
202 /* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
203 static void emit_rel_store(struct bpf_gen *gen, int off, int data)
204 {
205 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
206 					 0, 0, 0, data));
207 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
208 					 0, 0, 0, off));
209 	emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
210 }
211 
212 static void move_blob2blob(struct bpf_gen *gen, int off, int size, int blob_off)
213 {
214 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
215 					 0, 0, 0, blob_off));
216 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_2, 0));
217 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
218 					 0, 0, 0, off));
219 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
220 }
221 
222 static void move_blob2ctx(struct bpf_gen *gen, int ctx_off, int size, int blob_off)
223 {
224 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
225 					 0, 0, 0, blob_off));
226 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_1, 0));
227 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
228 }
229 
230 static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
231 				   bool check_non_zero)
232 {
233 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
234 	if (check_non_zero)
235 		/* If value in ctx is zero don't update the blob.
236 		 * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
237 		 */
238 		emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
239 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
240 					 0, 0, 0, off));
241 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
242 }
243 
244 static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
245 {
246 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
247 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
248 					 0, 0, 0, off));
249 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
250 }
251 
252 static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
253 {
254 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
255 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
256 }
257 
258 static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
259 {
260 	emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
261 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
262 					 0, 0, 0, attr));
263 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
264 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
265 	/* remember the result in R7 */
266 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
267 }
268 
269 static bool is_simm16(__s64 value)
270 {
271 	return value == (__s64)(__s16)value;
272 }
273 
274 static void emit_check_err(struct bpf_gen *gen)
275 {
276 	__s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
277 
278 	/* R7 contains result of last sys_bpf command.
279 	 * if (R7 < 0) goto cleanup;
280 	 */
281 	if (is_simm16(off)) {
282 		emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
283 	} else {
284 		gen->error = -ERANGE;
285 		emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
286 	}
287 }
288 
289 /* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
290 static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
291 		       const char *fmt, va_list args)
292 {
293 	char buf[1024];
294 	int addr, len, ret;
295 
296 	if (!gen->log_level)
297 		return;
298 	ret = vsnprintf(buf, sizeof(buf), fmt, args);
299 	if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
300 		/* The special case to accommodate common debug_ret():
301 		 * to avoid specifying BPF_REG_7 and adding " r=%%d" to
302 		 * prints explicitly.
303 		 */
304 		strcat(buf, " r=%d");
305 	len = strlen(buf) + 1;
306 	addr = add_data(gen, buf, len);
307 
308 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
309 					 0, 0, 0, addr));
310 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
311 	if (reg1 >= 0)
312 		emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
313 	if (reg2 >= 0)
314 		emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
315 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
316 }
317 
318 static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
319 {
320 	va_list args;
321 
322 	va_start(args, fmt);
323 	emit_debug(gen, reg1, reg2, fmt, args);
324 	va_end(args);
325 }
326 
327 static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
328 {
329 	va_list args;
330 
331 	va_start(args, fmt);
332 	emit_debug(gen, BPF_REG_7, -1, fmt, args);
333 	va_end(args);
334 }
335 
336 static void __emit_sys_close(struct bpf_gen *gen)
337 {
338 	emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
339 			      /* 2 is the number of the following insns
340 			       * * 6 is additional insns in debug_regs
341 			       */
342 			      2 + (gen->log_level ? 6 : 0)));
343 	emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
344 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
345 	debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
346 }
347 
348 static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
349 {
350 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
351 	__emit_sys_close(gen);
352 }
353 
354 static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
355 {
356 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
357 					 0, 0, 0, blob_off));
358 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
359 	__emit_sys_close(gen);
360 }
361 
362 int bpf_gen__finish(struct bpf_gen *gen)
363 {
364 	int i;
365 
366 	emit_sys_close_stack(gen, stack_off(btf_fd));
367 	for (i = 0; i < gen->nr_progs; i++)
368 		move_stack2ctx(gen,
369 			       sizeof(struct bpf_loader_ctx) +
370 			       sizeof(struct bpf_map_desc) * gen->nr_maps +
371 			       sizeof(struct bpf_prog_desc) * i +
372 			       offsetof(struct bpf_prog_desc, prog_fd), 4,
373 			       stack_off(prog_fd[i]));
374 	for (i = 0; i < gen->nr_maps; i++)
375 		move_blob2ctx(gen,
376 			      sizeof(struct bpf_loader_ctx) +
377 			      sizeof(struct bpf_map_desc) * i +
378 			      offsetof(struct bpf_map_desc, map_fd), 4,
379 			      blob_fd_array_off(gen, i));
380 	emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
381 	emit(gen, BPF_EXIT_INSN());
382 	pr_debug("gen: finish %d\n", gen->error);
383 	if (!gen->error) {
384 		struct gen_loader_opts *opts = gen->opts;
385 
386 		opts->insns = gen->insn_start;
387 		opts->insns_sz = gen->insn_cur - gen->insn_start;
388 		opts->data = gen->data_start;
389 		opts->data_sz = gen->data_cur - gen->data_start;
390 	}
391 	return gen->error;
392 }
393 
394 void bpf_gen__free(struct bpf_gen *gen)
395 {
396 	if (!gen)
397 		return;
398 	free(gen->data_start);
399 	free(gen->insn_start);
400 	free(gen);
401 }
402 
403 void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
404 		       __u32 btf_raw_size)
405 {
406 	int attr_size = offsetofend(union bpf_attr, btf_log_level);
407 	int btf_data, btf_load_attr;
408 	union bpf_attr attr;
409 
410 	memset(&attr, 0, attr_size);
411 	pr_debug("gen: load_btf: size %d\n", btf_raw_size);
412 	btf_data = add_data(gen, btf_raw_data, btf_raw_size);
413 
414 	attr.btf_size = btf_raw_size;
415 	btf_load_attr = add_data(gen, &attr, attr_size);
416 
417 	/* populate union bpf_attr with user provided log details */
418 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
419 		      offsetof(struct bpf_loader_ctx, log_level), false);
420 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
421 		      offsetof(struct bpf_loader_ctx, log_size), false);
422 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
423 		      offsetof(struct bpf_loader_ctx, log_buf), false);
424 	/* populate union bpf_attr with a pointer to the BTF data */
425 	emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
426 	/* emit BTF_LOAD command */
427 	emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
428 	debug_ret(gen, "btf_load size %d", btf_raw_size);
429 	emit_check_err(gen);
430 	/* remember btf_fd in the stack, if successful */
431 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
432 }
433 
434 void bpf_gen__map_create(struct bpf_gen *gen,
435 			 struct bpf_create_map_params *map_attr, int map_idx)
436 {
437 	int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id);
438 	bool close_inner_map_fd = false;
439 	int map_create_attr, idx;
440 	union bpf_attr attr;
441 
442 	memset(&attr, 0, attr_size);
443 	attr.map_type = map_attr->map_type;
444 	attr.key_size = map_attr->key_size;
445 	attr.value_size = map_attr->value_size;
446 	attr.map_flags = map_attr->map_flags;
447 	attr.map_extra = map_attr->map_extra;
448 	memcpy(attr.map_name, map_attr->name,
449 	       min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1));
450 	attr.numa_node = map_attr->numa_node;
451 	attr.map_ifindex = map_attr->map_ifindex;
452 	attr.max_entries = map_attr->max_entries;
453 	switch (attr.map_type) {
454 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
455 	case BPF_MAP_TYPE_CGROUP_ARRAY:
456 	case BPF_MAP_TYPE_STACK_TRACE:
457 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
458 	case BPF_MAP_TYPE_HASH_OF_MAPS:
459 	case BPF_MAP_TYPE_DEVMAP:
460 	case BPF_MAP_TYPE_DEVMAP_HASH:
461 	case BPF_MAP_TYPE_CPUMAP:
462 	case BPF_MAP_TYPE_XSKMAP:
463 	case BPF_MAP_TYPE_SOCKMAP:
464 	case BPF_MAP_TYPE_SOCKHASH:
465 	case BPF_MAP_TYPE_QUEUE:
466 	case BPF_MAP_TYPE_STACK:
467 	case BPF_MAP_TYPE_RINGBUF:
468 		break;
469 	default:
470 		attr.btf_key_type_id = map_attr->btf_key_type_id;
471 		attr.btf_value_type_id = map_attr->btf_value_type_id;
472 	}
473 
474 	pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
475 		 attr.map_name, map_idx, map_attr->map_type, attr.btf_value_type_id);
476 
477 	map_create_attr = add_data(gen, &attr, attr_size);
478 	if (attr.btf_value_type_id)
479 		/* populate union bpf_attr with btf_fd saved in the stack earlier */
480 		move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
481 				stack_off(btf_fd));
482 	switch (attr.map_type) {
483 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
484 	case BPF_MAP_TYPE_HASH_OF_MAPS:
485 		move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
486 				stack_off(inner_map_fd));
487 		close_inner_map_fd = true;
488 		break;
489 	default:
490 		break;
491 	}
492 	/* conditionally update max_entries */
493 	if (map_idx >= 0)
494 		move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
495 			      sizeof(struct bpf_loader_ctx) +
496 			      sizeof(struct bpf_map_desc) * map_idx +
497 			      offsetof(struct bpf_map_desc, max_entries),
498 			      true /* check that max_entries != 0 */);
499 	/* emit MAP_CREATE command */
500 	emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
501 	debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
502 		  attr.map_name, map_idx, map_attr->map_type, attr.value_size,
503 		  attr.btf_value_type_id);
504 	emit_check_err(gen);
505 	/* remember map_fd in the stack, if successful */
506 	if (map_idx < 0) {
507 		/* This bpf_gen__map_create() function is called with map_idx >= 0
508 		 * for all maps that libbpf loading logic tracks.
509 		 * It's called with -1 to create an inner map.
510 		 */
511 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
512 				      stack_off(inner_map_fd)));
513 	} else if (map_idx != gen->nr_maps) {
514 		gen->error = -EDOM; /* internal bug */
515 		return;
516 	} else {
517 		/* add_map_fd does gen->nr_maps++ */
518 		idx = add_map_fd(gen);
519 		emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
520 						 0, 0, 0, blob_fd_array_off(gen, idx)));
521 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7, 0));
522 	}
523 	if (close_inner_map_fd)
524 		emit_sys_close_stack(gen, stack_off(inner_map_fd));
525 }
526 
527 void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
528 				   enum bpf_attach_type type)
529 {
530 	const char *prefix;
531 	int kind, ret;
532 
533 	btf_get_kernel_prefix_kind(type, &prefix, &kind);
534 	gen->attach_kind = kind;
535 	ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
536 		       prefix, attach_name);
537 	if (ret == sizeof(gen->attach_target))
538 		gen->error = -ENOSPC;
539 }
540 
541 static void emit_find_attach_target(struct bpf_gen *gen)
542 {
543 	int name, len = strlen(gen->attach_target) + 1;
544 
545 	pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
546 	name = add_data(gen, gen->attach_target, len);
547 
548 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
549 					 0, 0, 0, name));
550 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
551 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
552 	emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
553 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
554 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
555 	debug_ret(gen, "find_by_name_kind(%s,%d)",
556 		  gen->attach_target, gen->attach_kind);
557 	emit_check_err(gen);
558 	/* if successful, btf_id is in lower 32-bit of R7 and
559 	 * btf_obj_fd is in upper 32-bit
560 	 */
561 }
562 
563 void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, bool is_weak,
564 			    bool is_typeless, int kind, int insn_idx)
565 {
566 	struct ksym_relo_desc *relo;
567 
568 	relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
569 	if (!relo) {
570 		gen->error = -ENOMEM;
571 		return;
572 	}
573 	gen->relos = relo;
574 	relo += gen->relo_cnt;
575 	relo->name = name;
576 	relo->is_weak = is_weak;
577 	relo->is_typeless = is_typeless;
578 	relo->kind = kind;
579 	relo->insn_idx = insn_idx;
580 	gen->relo_cnt++;
581 }
582 
583 /* returns existing ksym_desc with ref incremented, or inserts a new one */
584 static struct ksym_desc *get_ksym_desc(struct bpf_gen *gen, struct ksym_relo_desc *relo)
585 {
586 	struct ksym_desc *kdesc;
587 
588 	for (int i = 0; i < gen->nr_ksyms; i++) {
589 		if (!strcmp(gen->ksyms[i].name, relo->name)) {
590 			gen->ksyms[i].ref++;
591 			return &gen->ksyms[i];
592 		}
593 	}
594 	kdesc = libbpf_reallocarray(gen->ksyms, gen->nr_ksyms + 1, sizeof(*kdesc));
595 	if (!kdesc) {
596 		gen->error = -ENOMEM;
597 		return NULL;
598 	}
599 	gen->ksyms = kdesc;
600 	kdesc = &gen->ksyms[gen->nr_ksyms++];
601 	kdesc->name = relo->name;
602 	kdesc->kind = relo->kind;
603 	kdesc->ref = 1;
604 	kdesc->off = 0;
605 	kdesc->insn = 0;
606 	return kdesc;
607 }
608 
609 /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
610  * Returns result in BPF_REG_7
611  */
612 static void emit_bpf_find_by_name_kind(struct bpf_gen *gen, struct ksym_relo_desc *relo)
613 {
614 	int name_off, len = strlen(relo->name) + 1;
615 
616 	name_off = add_data(gen, relo->name, len);
617 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
618 					 0, 0, 0, name_off));
619 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
620 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
621 	emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
622 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
623 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
624 	debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
625 }
626 
627 /* Overwrites BPF_REG_{0, 1, 2, 3, 4, 7}
628  * Returns result in BPF_REG_7
629  * Returns u64 symbol addr in BPF_REG_9
630  */
631 static void emit_bpf_kallsyms_lookup_name(struct bpf_gen *gen, struct ksym_relo_desc *relo)
632 {
633 	int name_off, len = strlen(relo->name) + 1, res_off;
634 
635 	name_off = add_data(gen, relo->name, len);
636 	res_off = add_data(gen, NULL, 8); /* res is u64 */
637 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
638 					 0, 0, 0, name_off));
639 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
640 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
641 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_4, BPF_PSEUDO_MAP_IDX_VALUE,
642 					 0, 0, 0, res_off));
643 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_4));
644 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_kallsyms_lookup_name));
645 	emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_9, BPF_REG_7, 0));
646 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
647 	debug_ret(gen, "kallsyms_lookup_name(%s,%d)", relo->name, relo->kind);
648 }
649 
650 /* Expects:
651  * BPF_REG_8 - pointer to instruction
652  *
653  * We need to reuse BTF fd for same symbol otherwise each relocation takes a new
654  * index, while kernel limits total kfunc BTFs to 256. For duplicate symbols,
655  * this would mean a new BTF fd index for each entry. By pairing symbol name
656  * with index, we get the insn->imm, insn->off pairing that kernel uses for
657  * kfunc_tab, which becomes the effective limit even though all of them may
658  * share same index in fd_array (such that kfunc_btf_tab has 1 element).
659  */
660 static void emit_relo_kfunc_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
661 {
662 	struct ksym_desc *kdesc;
663 	int btf_fd_idx;
664 
665 	kdesc = get_ksym_desc(gen, relo);
666 	if (!kdesc)
667 		return;
668 	/* try to copy from existing bpf_insn */
669 	if (kdesc->ref > 1) {
670 		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
671 			       kdesc->insn + offsetof(struct bpf_insn, imm));
672 		move_blob2blob(gen, insn + offsetof(struct bpf_insn, off), 2,
673 			       kdesc->insn + offsetof(struct bpf_insn, off));
674 		goto log;
675 	}
676 	/* remember insn offset, so we can copy BTF ID and FD later */
677 	kdesc->insn = insn;
678 	emit_bpf_find_by_name_kind(gen, relo);
679 	if (!relo->is_weak)
680 		emit_check_err(gen);
681 	/* get index in fd_array to store BTF FD at */
682 	btf_fd_idx = add_kfunc_btf_fd(gen);
683 	if (btf_fd_idx > INT16_MAX) {
684 		pr_warn("BTF fd off %d for kfunc %s exceeds INT16_MAX, cannot process relocation\n",
685 			btf_fd_idx, relo->name);
686 		gen->error = -E2BIG;
687 		return;
688 	}
689 	kdesc->off = btf_fd_idx;
690 	/* set a default value for imm */
691 	emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
692 	/* skip success case store if ret < 0 */
693 	emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 1));
694 	/* store btf_id into insn[insn_idx].imm */
695 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
696 	/* load fd_array slot pointer */
697 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
698 					 0, 0, 0, blob_fd_array_off(gen, btf_fd_idx)));
699 	/* skip store of BTF fd if ret < 0 */
700 	emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 3));
701 	/* store BTF fd in slot */
702 	emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_7));
703 	emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
704 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_9, 0));
705 	/* set a default value for off */
706 	emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), 0));
707 	/* skip insn->off store if ret < 0 */
708 	emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 2));
709 	/* skip if vmlinux BTF */
710 	emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_9, 0, 1));
711 	/* store index into insn[insn_idx].off */
712 	emit(gen, BPF_ST_MEM(BPF_H, BPF_REG_8, offsetof(struct bpf_insn, off), btf_fd_idx));
713 log:
714 	if (!gen->log_level)
715 		return;
716 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
717 			      offsetof(struct bpf_insn, imm)));
718 	emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8,
719 			      offsetof(struct bpf_insn, off)));
720 	debug_regs(gen, BPF_REG_7, BPF_REG_9, " func (%s:count=%d): imm: %%d, off: %%d",
721 		   relo->name, kdesc->ref);
722 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
723 					 0, 0, 0, blob_fd_array_off(gen, kdesc->off)));
724 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_0, 0));
725 	debug_regs(gen, BPF_REG_9, -1, " func (%s:count=%d): btf_fd",
726 		   relo->name, kdesc->ref);
727 }
728 
729 static void emit_ksym_relo_log(struct bpf_gen *gen, struct ksym_relo_desc *relo,
730 			       int ref)
731 {
732 	if (!gen->log_level)
733 		return;
734 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_8,
735 			      offsetof(struct bpf_insn, imm)));
736 	emit(gen, BPF_LDX_MEM(BPF_H, BPF_REG_9, BPF_REG_8, sizeof(struct bpf_insn) +
737 			      offsetof(struct bpf_insn, imm)));
738 	debug_regs(gen, BPF_REG_7, BPF_REG_9, " var t=%d w=%d (%s:count=%d): imm[0]: %%d, imm[1]: %%d",
739 		   relo->is_typeless, relo->is_weak, relo->name, ref);
740 	emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
741 	debug_regs(gen, BPF_REG_9, -1, " var t=%d w=%d (%s:count=%d): insn.reg",
742 		   relo->is_typeless, relo->is_weak, relo->name, ref);
743 }
744 
745 /* Expects:
746  * BPF_REG_8 - pointer to instruction
747  */
748 static void emit_relo_ksym_typeless(struct bpf_gen *gen,
749 				    struct ksym_relo_desc *relo, int insn)
750 {
751 	struct ksym_desc *kdesc;
752 
753 	kdesc = get_ksym_desc(gen, relo);
754 	if (!kdesc)
755 		return;
756 	/* try to copy from existing ldimm64 insn */
757 	if (kdesc->ref > 1) {
758 		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
759 			       kdesc->insn + offsetof(struct bpf_insn, imm));
760 		move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
761 			       kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
762 		goto log;
763 	}
764 	/* remember insn offset, so we can copy ksym addr later */
765 	kdesc->insn = insn;
766 	/* skip typeless ksym_desc in fd closing loop in cleanup_relos */
767 	kdesc->typeless = true;
768 	emit_bpf_kallsyms_lookup_name(gen, relo);
769 	emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_7, -ENOENT, 1));
770 	emit_check_err(gen);
771 	/* store lower half of addr into insn[insn_idx].imm */
772 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9, offsetof(struct bpf_insn, imm)));
773 	/* store upper half of addr into insn[insn_idx + 1].imm */
774 	emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_9, 32));
775 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_9,
776 		      sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
777 log:
778 	emit_ksym_relo_log(gen, relo, kdesc->ref);
779 }
780 
781 static __u32 src_reg_mask(void)
782 {
783 #if defined(__LITTLE_ENDIAN_BITFIELD)
784 	return 0x0f; /* src_reg,dst_reg,... */
785 #elif defined(__BIG_ENDIAN_BITFIELD)
786 	return 0xf0; /* dst_reg,src_reg,... */
787 #else
788 #error "Unsupported bit endianness, cannot proceed"
789 #endif
790 }
791 
792 /* Expects:
793  * BPF_REG_8 - pointer to instruction
794  */
795 static void emit_relo_ksym_btf(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insn)
796 {
797 	struct ksym_desc *kdesc;
798 	__u32 reg_mask;
799 
800 	kdesc = get_ksym_desc(gen, relo);
801 	if (!kdesc)
802 		return;
803 	/* try to copy from existing ldimm64 insn */
804 	if (kdesc->ref > 1) {
805 		move_blob2blob(gen, insn + offsetof(struct bpf_insn, imm), 4,
806 			       kdesc->insn + offsetof(struct bpf_insn, imm));
807 		move_blob2blob(gen, insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 4,
808 			       kdesc->insn + sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm));
809 		emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_8, offsetof(struct bpf_insn, imm)));
810 		/* jump over src_reg adjustment if imm is not 0 */
811 		emit(gen, BPF_JMP_IMM(BPF_JNE, BPF_REG_9, 0, 3));
812 		goto clear_src_reg;
813 	}
814 	/* remember insn offset, so we can copy BTF ID and FD later */
815 	kdesc->insn = insn;
816 	emit_bpf_find_by_name_kind(gen, relo);
817 	if (!relo->is_weak)
818 		emit_check_err(gen);
819 	/* set default values as 0 */
820 	emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, offsetof(struct bpf_insn, imm), 0));
821 	emit(gen, BPF_ST_MEM(BPF_W, BPF_REG_8, sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm), 0));
822 	/* skip success case stores if ret < 0 */
823 	emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, 4));
824 	/* store btf_id into insn[insn_idx].imm */
825 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7, offsetof(struct bpf_insn, imm)));
826 	/* store btf_obj_fd into insn[insn_idx + 1].imm */
827 	emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
828 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_8, BPF_REG_7,
829 			      sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm)));
830 	emit(gen, BPF_JMP_IMM(BPF_JSGE, BPF_REG_7, 0, 3));
831 clear_src_reg:
832 	/* clear bpf_object__relocate_data's src_reg assignment, otherwise we get a verifier failure */
833 	reg_mask = src_reg_mask();
834 	emit(gen, BPF_LDX_MEM(BPF_B, BPF_REG_9, BPF_REG_8, offsetofend(struct bpf_insn, code)));
835 	emit(gen, BPF_ALU32_IMM(BPF_AND, BPF_REG_9, reg_mask));
836 	emit(gen, BPF_STX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, offsetofend(struct bpf_insn, code)));
837 
838 	emit_ksym_relo_log(gen, relo, kdesc->ref);
839 }
840 
841 static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
842 {
843 	int insn;
844 
845 	pr_debug("gen: emit_relo (%d): %s at %d\n", relo->kind, relo->name, relo->insn_idx);
846 	insn = insns + sizeof(struct bpf_insn) * relo->insn_idx;
847 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_8, BPF_PSEUDO_MAP_IDX_VALUE, 0, 0, 0, insn));
848 	switch (relo->kind) {
849 	case BTF_KIND_VAR:
850 		if (relo->is_typeless)
851 			emit_relo_ksym_typeless(gen, relo, insn);
852 		else
853 			emit_relo_ksym_btf(gen, relo, insn);
854 		break;
855 	case BTF_KIND_FUNC:
856 		emit_relo_kfunc_btf(gen, relo, insn);
857 		break;
858 	default:
859 		pr_warn("Unknown relocation kind '%d'\n", relo->kind);
860 		gen->error = -EDOM;
861 		return;
862 	}
863 }
864 
865 static void emit_relos(struct bpf_gen *gen, int insns)
866 {
867 	int i;
868 
869 	for (i = 0; i < gen->relo_cnt; i++)
870 		emit_relo(gen, gen->relos + i, insns);
871 }
872 
873 static void cleanup_relos(struct bpf_gen *gen, int insns)
874 {
875 	int i, insn;
876 
877 	for (i = 0; i < gen->nr_ksyms; i++) {
878 		/* only close fds for typed ksyms and kfuncs */
879 		if (gen->ksyms[i].kind == BTF_KIND_VAR && !gen->ksyms[i].typeless) {
880 			/* close fd recorded in insn[insn_idx + 1].imm */
881 			insn = gen->ksyms[i].insn;
882 			insn += sizeof(struct bpf_insn) + offsetof(struct bpf_insn, imm);
883 			emit_sys_close_blob(gen, insn);
884 		} else if (gen->ksyms[i].kind == BTF_KIND_FUNC) {
885 			emit_sys_close_blob(gen, blob_fd_array_off(gen, gen->ksyms[i].off));
886 			if (gen->ksyms[i].off < MAX_FD_ARRAY_SZ)
887 				gen->nr_fd_array--;
888 		}
889 	}
890 	if (gen->nr_ksyms) {
891 		free(gen->ksyms);
892 		gen->nr_ksyms = 0;
893 		gen->ksyms = NULL;
894 	}
895 	if (gen->relo_cnt) {
896 		free(gen->relos);
897 		gen->relo_cnt = 0;
898 		gen->relos = NULL;
899 	}
900 }
901 
902 void bpf_gen__prog_load(struct bpf_gen *gen,
903 			struct bpf_prog_load_params *load_attr, int prog_idx)
904 {
905 	int attr_size = offsetofend(union bpf_attr, fd_array);
906 	int prog_load_attr, license, insns, func_info, line_info;
907 	union bpf_attr attr;
908 
909 	memset(&attr, 0, attr_size);
910 	pr_debug("gen: prog_load: type %d insns_cnt %zd\n",
911 		 load_attr->prog_type, load_attr->insn_cnt);
912 	/* add license string to blob of bytes */
913 	license = add_data(gen, load_attr->license, strlen(load_attr->license) + 1);
914 	/* add insns to blob of bytes */
915 	insns = add_data(gen, load_attr->insns,
916 			 load_attr->insn_cnt * sizeof(struct bpf_insn));
917 
918 	attr.prog_type = load_attr->prog_type;
919 	attr.expected_attach_type = load_attr->expected_attach_type;
920 	attr.attach_btf_id = load_attr->attach_btf_id;
921 	attr.prog_ifindex = load_attr->prog_ifindex;
922 	attr.kern_version = 0;
923 	attr.insn_cnt = (__u32)load_attr->insn_cnt;
924 	attr.prog_flags = load_attr->prog_flags;
925 
926 	attr.func_info_rec_size = load_attr->func_info_rec_size;
927 	attr.func_info_cnt = load_attr->func_info_cnt;
928 	func_info = add_data(gen, load_attr->func_info,
929 			     attr.func_info_cnt * attr.func_info_rec_size);
930 
931 	attr.line_info_rec_size = load_attr->line_info_rec_size;
932 	attr.line_info_cnt = load_attr->line_info_cnt;
933 	line_info = add_data(gen, load_attr->line_info,
934 			     attr.line_info_cnt * attr.line_info_rec_size);
935 
936 	memcpy(attr.prog_name, load_attr->name,
937 	       min((unsigned)strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
938 	prog_load_attr = add_data(gen, &attr, attr_size);
939 
940 	/* populate union bpf_attr with a pointer to license */
941 	emit_rel_store(gen, attr_field(prog_load_attr, license), license);
942 
943 	/* populate union bpf_attr with a pointer to instructions */
944 	emit_rel_store(gen, attr_field(prog_load_attr, insns), insns);
945 
946 	/* populate union bpf_attr with a pointer to func_info */
947 	emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
948 
949 	/* populate union bpf_attr with a pointer to line_info */
950 	emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
951 
952 	/* populate union bpf_attr fd_array with a pointer to data where map_fds are saved */
953 	emit_rel_store(gen, attr_field(prog_load_attr, fd_array), gen->fd_array);
954 
955 	/* populate union bpf_attr with user provided log details */
956 	move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
957 		      offsetof(struct bpf_loader_ctx, log_level), false);
958 	move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
959 		      offsetof(struct bpf_loader_ctx, log_size), false);
960 	move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
961 		      offsetof(struct bpf_loader_ctx, log_buf), false);
962 	/* populate union bpf_attr with btf_fd saved in the stack earlier */
963 	move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
964 			stack_off(btf_fd));
965 	if (gen->attach_kind) {
966 		emit_find_attach_target(gen);
967 		/* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
968 		emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
969 						 0, 0, 0, prog_load_attr));
970 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
971 				      offsetof(union bpf_attr, attach_btf_id)));
972 		emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
973 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
974 				      offsetof(union bpf_attr, attach_btf_obj_fd)));
975 	}
976 	emit_relos(gen, insns);
977 	/* emit PROG_LOAD command */
978 	emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
979 	debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
980 	/* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
981 	cleanup_relos(gen, insns);
982 	if (gen->attach_kind)
983 		emit_sys_close_blob(gen,
984 				    attr_field(prog_load_attr, attach_btf_obj_fd));
985 	emit_check_err(gen);
986 	/* remember prog_fd in the stack, if successful */
987 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
988 			      stack_off(prog_fd[gen->nr_progs])));
989 	gen->nr_progs++;
990 }
991 
992 void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
993 			      __u32 value_size)
994 {
995 	int attr_size = offsetofend(union bpf_attr, flags);
996 	int map_update_attr, value, key;
997 	union bpf_attr attr;
998 	int zero = 0;
999 
1000 	memset(&attr, 0, attr_size);
1001 	pr_debug("gen: map_update_elem: idx %d\n", map_idx);
1002 
1003 	value = add_data(gen, pvalue, value_size);
1004 	key = add_data(gen, &zero, sizeof(zero));
1005 
1006 	/* if (map_desc[map_idx].initial_value)
1007 	 *    copy_from_user(value, initial_value, value_size);
1008 	 */
1009 	emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
1010 			      sizeof(struct bpf_loader_ctx) +
1011 			      sizeof(struct bpf_map_desc) * map_idx +
1012 			      offsetof(struct bpf_map_desc, initial_value)));
1013 	emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 4));
1014 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
1015 					 0, 0, 0, value));
1016 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
1017 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
1018 
1019 	map_update_attr = add_data(gen, &attr, attr_size);
1020 	move_blob2blob(gen, attr_field(map_update_attr, map_fd), 4,
1021 		       blob_fd_array_off(gen, map_idx));
1022 	emit_rel_store(gen, attr_field(map_update_attr, key), key);
1023 	emit_rel_store(gen, attr_field(map_update_attr, value), value);
1024 	/* emit MAP_UPDATE_ELEM command */
1025 	emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
1026 	debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
1027 	emit_check_err(gen);
1028 }
1029 
1030 void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
1031 {
1032 	int attr_size = offsetofend(union bpf_attr, map_fd);
1033 	int map_freeze_attr;
1034 	union bpf_attr attr;
1035 
1036 	memset(&attr, 0, attr_size);
1037 	pr_debug("gen: map_freeze: idx %d\n", map_idx);
1038 	map_freeze_attr = add_data(gen, &attr, attr_size);
1039 	move_blob2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
1040 		       blob_fd_array_off(gen, map_idx));
1041 	/* emit MAP_FREEZE command */
1042 	emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
1043 	debug_ret(gen, "map_freeze");
1044 	emit_check_err(gen);
1045 }
1046