xref: /openbmc/linux/tools/lib/bpf/gen_loader.c (revision f3956ebb)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2021 Facebook */
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <errno.h>
7 #include <linux/filter.h>
8 #include <sys/param.h>
9 #include "btf.h"
10 #include "bpf.h"
11 #include "libbpf.h"
12 #include "libbpf_internal.h"
13 #include "hashmap.h"
14 #include "bpf_gen_internal.h"
15 #include "skel_internal.h"
16 
17 #define MAX_USED_MAPS 64
18 #define MAX_USED_PROGS 32
19 
20 /* The following structure describes the stack layout of the loader program.
21  * In addition R6 contains the pointer to context.
22  * R7 contains the result of the last sys_bpf command (typically error or FD).
23  * R9 contains the result of the last sys_close command.
24  *
25  * Naming convention:
26  * ctx - bpf program context
27  * stack - bpf program stack
28  * blob - bpf_attr-s, strings, insns, map data.
29  *        All the bytes that loader prog will use for read/write.
30  */
31 struct loader_stack {
32 	__u32 btf_fd;
33 	__u32 map_fd[MAX_USED_MAPS];
34 	__u32 prog_fd[MAX_USED_PROGS];
35 	__u32 inner_map_fd;
36 };
37 
38 #define stack_off(field) \
39 	(__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
40 
41 #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
42 
43 static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
44 {
45 	size_t off = gen->insn_cur - gen->insn_start;
46 	void *insn_start;
47 
48 	if (gen->error)
49 		return gen->error;
50 	if (size > INT32_MAX || off + size > INT32_MAX) {
51 		gen->error = -ERANGE;
52 		return -ERANGE;
53 	}
54 	insn_start = realloc(gen->insn_start, off + size);
55 	if (!insn_start) {
56 		gen->error = -ENOMEM;
57 		free(gen->insn_start);
58 		gen->insn_start = NULL;
59 		return -ENOMEM;
60 	}
61 	gen->insn_start = insn_start;
62 	gen->insn_cur = insn_start + off;
63 	return 0;
64 }
65 
66 static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
67 {
68 	size_t off = gen->data_cur - gen->data_start;
69 	void *data_start;
70 
71 	if (gen->error)
72 		return gen->error;
73 	if (size > INT32_MAX || off + size > INT32_MAX) {
74 		gen->error = -ERANGE;
75 		return -ERANGE;
76 	}
77 	data_start = realloc(gen->data_start, off + size);
78 	if (!data_start) {
79 		gen->error = -ENOMEM;
80 		free(gen->data_start);
81 		gen->data_start = NULL;
82 		return -ENOMEM;
83 	}
84 	gen->data_start = data_start;
85 	gen->data_cur = data_start + off;
86 	return 0;
87 }
88 
89 static void emit(struct bpf_gen *gen, struct bpf_insn insn)
90 {
91 	if (realloc_insn_buf(gen, sizeof(insn)))
92 		return;
93 	memcpy(gen->insn_cur, &insn, sizeof(insn));
94 	gen->insn_cur += sizeof(insn);
95 }
96 
97 static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
98 {
99 	emit(gen, insn1);
100 	emit(gen, insn2);
101 }
102 
103 void bpf_gen__init(struct bpf_gen *gen, int log_level)
104 {
105 	size_t stack_sz = sizeof(struct loader_stack);
106 	int i;
107 
108 	gen->log_level = log_level;
109 	/* save ctx pointer into R6 */
110 	emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
111 
112 	/* bzero stack */
113 	emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
114 	emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
115 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
116 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
117 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
118 
119 	/* jump over cleanup code */
120 	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
121 			      /* size of cleanup code below */
122 			      (stack_sz / 4) * 3 + 2));
123 
124 	/* remember the label where all error branches will jump to */
125 	gen->cleanup_label = gen->insn_cur - gen->insn_start;
126 	/* emit cleanup code: close all temp FDs */
127 	for (i = 0; i < stack_sz; i += 4) {
128 		emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
129 		emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
130 		emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
131 	}
132 	/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
133 	emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
134 	emit(gen, BPF_EXIT_INSN());
135 }
136 
137 static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
138 {
139 	__u32 size8 = roundup(size, 8);
140 	__u64 zero = 0;
141 	void *prev;
142 
143 	if (realloc_data_buf(gen, size8))
144 		return 0;
145 	prev = gen->data_cur;
146 	memcpy(gen->data_cur, data, size);
147 	gen->data_cur += size;
148 	memcpy(gen->data_cur, &zero, size8 - size);
149 	gen->data_cur += size8 - size;
150 	return prev - gen->data_start;
151 }
152 
153 static int insn_bytes_to_bpf_size(__u32 sz)
154 {
155 	switch (sz) {
156 	case 8: return BPF_DW;
157 	case 4: return BPF_W;
158 	case 2: return BPF_H;
159 	case 1: return BPF_B;
160 	default: return -1;
161 	}
162 }
163 
164 /* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
165 static void emit_rel_store(struct bpf_gen *gen, int off, int data)
166 {
167 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
168 					 0, 0, 0, data));
169 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
170 					 0, 0, 0, off));
171 	emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
172 }
173 
174 /* *(u64 *)(blob + off) = (u64)(void *)(%sp + stack_off) */
175 static void emit_rel_store_sp(struct bpf_gen *gen, int off, int stack_off)
176 {
177 	emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_10));
178 	emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, stack_off));
179 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
180 					 0, 0, 0, off));
181 	emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
182 }
183 
184 static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
185 				   bool check_non_zero)
186 {
187 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
188 	if (check_non_zero)
189 		/* If value in ctx is zero don't update the blob.
190 		 * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
191 		 */
192 		emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
193 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
194 					 0, 0, 0, off));
195 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
196 }
197 
198 static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
199 {
200 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
201 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
202 					 0, 0, 0, off));
203 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
204 }
205 
206 static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
207 {
208 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
209 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
210 }
211 
212 static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
213 {
214 	emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
215 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
216 					 0, 0, 0, attr));
217 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
218 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
219 	/* remember the result in R7 */
220 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
221 }
222 
223 static bool is_simm16(__s64 value)
224 {
225 	return value == (__s64)(__s16)value;
226 }
227 
228 static void emit_check_err(struct bpf_gen *gen)
229 {
230 	__s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
231 
232 	/* R7 contains result of last sys_bpf command.
233 	 * if (R7 < 0) goto cleanup;
234 	 */
235 	if (is_simm16(off)) {
236 		emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
237 	} else {
238 		gen->error = -ERANGE;
239 		emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
240 	}
241 }
242 
243 /* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
244 static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
245 		       const char *fmt, va_list args)
246 {
247 	char buf[1024];
248 	int addr, len, ret;
249 
250 	if (!gen->log_level)
251 		return;
252 	ret = vsnprintf(buf, sizeof(buf), fmt, args);
253 	if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
254 		/* The special case to accommodate common debug_ret():
255 		 * to avoid specifying BPF_REG_7 and adding " r=%%d" to
256 		 * prints explicitly.
257 		 */
258 		strcat(buf, " r=%d");
259 	len = strlen(buf) + 1;
260 	addr = add_data(gen, buf, len);
261 
262 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
263 					 0, 0, 0, addr));
264 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
265 	if (reg1 >= 0)
266 		emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
267 	if (reg2 >= 0)
268 		emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
269 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
270 }
271 
272 static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
273 {
274 	va_list args;
275 
276 	va_start(args, fmt);
277 	emit_debug(gen, reg1, reg2, fmt, args);
278 	va_end(args);
279 }
280 
281 static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
282 {
283 	va_list args;
284 
285 	va_start(args, fmt);
286 	emit_debug(gen, BPF_REG_7, -1, fmt, args);
287 	va_end(args);
288 }
289 
290 static void __emit_sys_close(struct bpf_gen *gen)
291 {
292 	emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
293 			      /* 2 is the number of the following insns
294 			       * * 6 is additional insns in debug_regs
295 			       */
296 			      2 + (gen->log_level ? 6 : 0)));
297 	emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
298 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
299 	debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
300 }
301 
302 static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
303 {
304 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
305 	__emit_sys_close(gen);
306 }
307 
308 static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
309 {
310 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
311 					 0, 0, 0, blob_off));
312 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
313 	__emit_sys_close(gen);
314 }
315 
316 int bpf_gen__finish(struct bpf_gen *gen)
317 {
318 	int i;
319 
320 	emit_sys_close_stack(gen, stack_off(btf_fd));
321 	for (i = 0; i < gen->nr_progs; i++)
322 		move_stack2ctx(gen,
323 			       sizeof(struct bpf_loader_ctx) +
324 			       sizeof(struct bpf_map_desc) * gen->nr_maps +
325 			       sizeof(struct bpf_prog_desc) * i +
326 			       offsetof(struct bpf_prog_desc, prog_fd), 4,
327 			       stack_off(prog_fd[i]));
328 	for (i = 0; i < gen->nr_maps; i++)
329 		move_stack2ctx(gen,
330 			       sizeof(struct bpf_loader_ctx) +
331 			       sizeof(struct bpf_map_desc) * i +
332 			       offsetof(struct bpf_map_desc, map_fd), 4,
333 			       stack_off(map_fd[i]));
334 	emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
335 	emit(gen, BPF_EXIT_INSN());
336 	pr_debug("gen: finish %d\n", gen->error);
337 	if (!gen->error) {
338 		struct gen_loader_opts *opts = gen->opts;
339 
340 		opts->insns = gen->insn_start;
341 		opts->insns_sz = gen->insn_cur - gen->insn_start;
342 		opts->data = gen->data_start;
343 		opts->data_sz = gen->data_cur - gen->data_start;
344 	}
345 	return gen->error;
346 }
347 
348 void bpf_gen__free(struct bpf_gen *gen)
349 {
350 	if (!gen)
351 		return;
352 	free(gen->data_start);
353 	free(gen->insn_start);
354 	free(gen);
355 }
356 
357 void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
358 		       __u32 btf_raw_size)
359 {
360 	int attr_size = offsetofend(union bpf_attr, btf_log_level);
361 	int btf_data, btf_load_attr;
362 	union bpf_attr attr;
363 
364 	memset(&attr, 0, attr_size);
365 	pr_debug("gen: load_btf: size %d\n", btf_raw_size);
366 	btf_data = add_data(gen, btf_raw_data, btf_raw_size);
367 
368 	attr.btf_size = btf_raw_size;
369 	btf_load_attr = add_data(gen, &attr, attr_size);
370 
371 	/* populate union bpf_attr with user provided log details */
372 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
373 		      offsetof(struct bpf_loader_ctx, log_level), false);
374 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
375 		      offsetof(struct bpf_loader_ctx, log_size), false);
376 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
377 		      offsetof(struct bpf_loader_ctx, log_buf), false);
378 	/* populate union bpf_attr with a pointer to the BTF data */
379 	emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
380 	/* emit BTF_LOAD command */
381 	emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
382 	debug_ret(gen, "btf_load size %d", btf_raw_size);
383 	emit_check_err(gen);
384 	/* remember btf_fd in the stack, if successful */
385 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
386 }
387 
388 void bpf_gen__map_create(struct bpf_gen *gen,
389 			 struct bpf_create_map_attr *map_attr, int map_idx)
390 {
391 	int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id);
392 	bool close_inner_map_fd = false;
393 	int map_create_attr;
394 	union bpf_attr attr;
395 
396 	memset(&attr, 0, attr_size);
397 	attr.map_type = map_attr->map_type;
398 	attr.key_size = map_attr->key_size;
399 	attr.value_size = map_attr->value_size;
400 	attr.map_flags = map_attr->map_flags;
401 	memcpy(attr.map_name, map_attr->name,
402 	       min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1));
403 	attr.numa_node = map_attr->numa_node;
404 	attr.map_ifindex = map_attr->map_ifindex;
405 	attr.max_entries = map_attr->max_entries;
406 	switch (attr.map_type) {
407 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
408 	case BPF_MAP_TYPE_CGROUP_ARRAY:
409 	case BPF_MAP_TYPE_STACK_TRACE:
410 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
411 	case BPF_MAP_TYPE_HASH_OF_MAPS:
412 	case BPF_MAP_TYPE_DEVMAP:
413 	case BPF_MAP_TYPE_DEVMAP_HASH:
414 	case BPF_MAP_TYPE_CPUMAP:
415 	case BPF_MAP_TYPE_XSKMAP:
416 	case BPF_MAP_TYPE_SOCKMAP:
417 	case BPF_MAP_TYPE_SOCKHASH:
418 	case BPF_MAP_TYPE_QUEUE:
419 	case BPF_MAP_TYPE_STACK:
420 	case BPF_MAP_TYPE_RINGBUF:
421 		break;
422 	default:
423 		attr.btf_key_type_id = map_attr->btf_key_type_id;
424 		attr.btf_value_type_id = map_attr->btf_value_type_id;
425 	}
426 
427 	pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
428 		 attr.map_name, map_idx, map_attr->map_type, attr.btf_value_type_id);
429 
430 	map_create_attr = add_data(gen, &attr, attr_size);
431 	if (attr.btf_value_type_id)
432 		/* populate union bpf_attr with btf_fd saved in the stack earlier */
433 		move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
434 				stack_off(btf_fd));
435 	switch (attr.map_type) {
436 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
437 	case BPF_MAP_TYPE_HASH_OF_MAPS:
438 		move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
439 				stack_off(inner_map_fd));
440 		close_inner_map_fd = true;
441 		break;
442 	default:
443 		break;
444 	}
445 	/* conditionally update max_entries */
446 	if (map_idx >= 0)
447 		move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
448 			      sizeof(struct bpf_loader_ctx) +
449 			      sizeof(struct bpf_map_desc) * map_idx +
450 			      offsetof(struct bpf_map_desc, max_entries),
451 			      true /* check that max_entries != 0 */);
452 	/* emit MAP_CREATE command */
453 	emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
454 	debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
455 		  attr.map_name, map_idx, map_attr->map_type, attr.value_size,
456 		  attr.btf_value_type_id);
457 	emit_check_err(gen);
458 	/* remember map_fd in the stack, if successful */
459 	if (map_idx < 0) {
460 		/* This bpf_gen__map_create() function is called with map_idx >= 0
461 		 * for all maps that libbpf loading logic tracks.
462 		 * It's called with -1 to create an inner map.
463 		 */
464 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
465 				      stack_off(inner_map_fd)));
466 	} else if (map_idx != gen->nr_maps) {
467 		gen->error = -EDOM; /* internal bug */
468 		return;
469 	} else {
470 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
471 				      stack_off(map_fd[map_idx])));
472 		gen->nr_maps++;
473 	}
474 	if (close_inner_map_fd)
475 		emit_sys_close_stack(gen, stack_off(inner_map_fd));
476 }
477 
478 void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
479 				   enum bpf_attach_type type)
480 {
481 	const char *prefix;
482 	int kind, ret;
483 
484 	btf_get_kernel_prefix_kind(type, &prefix, &kind);
485 	gen->attach_kind = kind;
486 	ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
487 		       prefix, attach_name);
488 	if (ret == sizeof(gen->attach_target))
489 		gen->error = -ENOSPC;
490 }
491 
492 static void emit_find_attach_target(struct bpf_gen *gen)
493 {
494 	int name, len = strlen(gen->attach_target) + 1;
495 
496 	pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
497 	name = add_data(gen, gen->attach_target, len);
498 
499 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
500 					 0, 0, 0, name));
501 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
502 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
503 	emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
504 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
505 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
506 	debug_ret(gen, "find_by_name_kind(%s,%d)",
507 		  gen->attach_target, gen->attach_kind);
508 	emit_check_err(gen);
509 	/* if successful, btf_id is in lower 32-bit of R7 and
510 	 * btf_obj_fd is in upper 32-bit
511 	 */
512 }
513 
514 void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
515 			    int insn_idx)
516 {
517 	struct ksym_relo_desc *relo;
518 
519 	relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
520 	if (!relo) {
521 		gen->error = -ENOMEM;
522 		return;
523 	}
524 	gen->relos = relo;
525 	relo += gen->relo_cnt;
526 	relo->name = name;
527 	relo->kind = kind;
528 	relo->insn_idx = insn_idx;
529 	gen->relo_cnt++;
530 }
531 
532 static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
533 {
534 	int name, insn, len = strlen(relo->name) + 1;
535 
536 	pr_debug("gen: emit_relo: %s at %d\n", relo->name, relo->insn_idx);
537 	name = add_data(gen, relo->name, len);
538 
539 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
540 					 0, 0, 0, name));
541 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
542 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
543 	emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
544 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
545 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
546 	debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
547 	emit_check_err(gen);
548 	/* store btf_id into insn[insn_idx].imm */
549 	insn = insns + sizeof(struct bpf_insn) * relo->insn_idx +
550 		offsetof(struct bpf_insn, imm);
551 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
552 					 0, 0, 0, insn));
553 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 0));
554 	if (relo->kind == BTF_KIND_VAR) {
555 		/* store btf_obj_fd into insn[insn_idx + 1].imm */
556 		emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
557 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
558 				      sizeof(struct bpf_insn)));
559 	}
560 }
561 
562 static void emit_relos(struct bpf_gen *gen, int insns)
563 {
564 	int i;
565 
566 	for (i = 0; i < gen->relo_cnt; i++)
567 		emit_relo(gen, gen->relos + i, insns);
568 }
569 
570 static void cleanup_relos(struct bpf_gen *gen, int insns)
571 {
572 	int i, insn;
573 
574 	for (i = 0; i < gen->relo_cnt; i++) {
575 		if (gen->relos[i].kind != BTF_KIND_VAR)
576 			continue;
577 		/* close fd recorded in insn[insn_idx + 1].imm */
578 		insn = insns +
579 			sizeof(struct bpf_insn) * (gen->relos[i].insn_idx + 1) +
580 			offsetof(struct bpf_insn, imm);
581 		emit_sys_close_blob(gen, insn);
582 	}
583 	if (gen->relo_cnt) {
584 		free(gen->relos);
585 		gen->relo_cnt = 0;
586 		gen->relos = NULL;
587 	}
588 }
589 
590 void bpf_gen__prog_load(struct bpf_gen *gen,
591 			struct bpf_prog_load_params *load_attr, int prog_idx)
592 {
593 	int attr_size = offsetofend(union bpf_attr, fd_array);
594 	int prog_load_attr, license, insns, func_info, line_info;
595 	union bpf_attr attr;
596 
597 	memset(&attr, 0, attr_size);
598 	pr_debug("gen: prog_load: type %d insns_cnt %zd\n",
599 		 load_attr->prog_type, load_attr->insn_cnt);
600 	/* add license string to blob of bytes */
601 	license = add_data(gen, load_attr->license, strlen(load_attr->license) + 1);
602 	/* add insns to blob of bytes */
603 	insns = add_data(gen, load_attr->insns,
604 			 load_attr->insn_cnt * sizeof(struct bpf_insn));
605 
606 	attr.prog_type = load_attr->prog_type;
607 	attr.expected_attach_type = load_attr->expected_attach_type;
608 	attr.attach_btf_id = load_attr->attach_btf_id;
609 	attr.prog_ifindex = load_attr->prog_ifindex;
610 	attr.kern_version = 0;
611 	attr.insn_cnt = (__u32)load_attr->insn_cnt;
612 	attr.prog_flags = load_attr->prog_flags;
613 
614 	attr.func_info_rec_size = load_attr->func_info_rec_size;
615 	attr.func_info_cnt = load_attr->func_info_cnt;
616 	func_info = add_data(gen, load_attr->func_info,
617 			     attr.func_info_cnt * attr.func_info_rec_size);
618 
619 	attr.line_info_rec_size = load_attr->line_info_rec_size;
620 	attr.line_info_cnt = load_attr->line_info_cnt;
621 	line_info = add_data(gen, load_attr->line_info,
622 			     attr.line_info_cnt * attr.line_info_rec_size);
623 
624 	memcpy(attr.prog_name, load_attr->name,
625 	       min((unsigned)strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
626 	prog_load_attr = add_data(gen, &attr, attr_size);
627 
628 	/* populate union bpf_attr with a pointer to license */
629 	emit_rel_store(gen, attr_field(prog_load_attr, license), license);
630 
631 	/* populate union bpf_attr with a pointer to instructions */
632 	emit_rel_store(gen, attr_field(prog_load_attr, insns), insns);
633 
634 	/* populate union bpf_attr with a pointer to func_info */
635 	emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
636 
637 	/* populate union bpf_attr with a pointer to line_info */
638 	emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
639 
640 	/* populate union bpf_attr fd_array with a pointer to stack where map_fds are saved */
641 	emit_rel_store_sp(gen, attr_field(prog_load_attr, fd_array),
642 			  stack_off(map_fd[0]));
643 
644 	/* populate union bpf_attr with user provided log details */
645 	move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
646 		      offsetof(struct bpf_loader_ctx, log_level), false);
647 	move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
648 		      offsetof(struct bpf_loader_ctx, log_size), false);
649 	move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
650 		      offsetof(struct bpf_loader_ctx, log_buf), false);
651 	/* populate union bpf_attr with btf_fd saved in the stack earlier */
652 	move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
653 			stack_off(btf_fd));
654 	if (gen->attach_kind) {
655 		emit_find_attach_target(gen);
656 		/* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
657 		emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
658 						 0, 0, 0, prog_load_attr));
659 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
660 				      offsetof(union bpf_attr, attach_btf_id)));
661 		emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
662 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
663 				      offsetof(union bpf_attr, attach_btf_obj_fd)));
664 	}
665 	emit_relos(gen, insns);
666 	/* emit PROG_LOAD command */
667 	emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
668 	debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
669 	/* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
670 	cleanup_relos(gen, insns);
671 	if (gen->attach_kind)
672 		emit_sys_close_blob(gen,
673 				    attr_field(prog_load_attr, attach_btf_obj_fd));
674 	emit_check_err(gen);
675 	/* remember prog_fd in the stack, if successful */
676 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
677 			      stack_off(prog_fd[gen->nr_progs])));
678 	gen->nr_progs++;
679 }
680 
681 void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
682 			      __u32 value_size)
683 {
684 	int attr_size = offsetofend(union bpf_attr, flags);
685 	int map_update_attr, value, key;
686 	union bpf_attr attr;
687 	int zero = 0;
688 
689 	memset(&attr, 0, attr_size);
690 	pr_debug("gen: map_update_elem: idx %d\n", map_idx);
691 
692 	value = add_data(gen, pvalue, value_size);
693 	key = add_data(gen, &zero, sizeof(zero));
694 
695 	/* if (map_desc[map_idx].initial_value)
696 	 *    copy_from_user(value, initial_value, value_size);
697 	 */
698 	emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
699 			      sizeof(struct bpf_loader_ctx) +
700 			      sizeof(struct bpf_map_desc) * map_idx +
701 			      offsetof(struct bpf_map_desc, initial_value)));
702 	emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 4));
703 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
704 					 0, 0, 0, value));
705 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
706 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
707 
708 	map_update_attr = add_data(gen, &attr, attr_size);
709 	move_stack2blob(gen, attr_field(map_update_attr, map_fd), 4,
710 			stack_off(map_fd[map_idx]));
711 	emit_rel_store(gen, attr_field(map_update_attr, key), key);
712 	emit_rel_store(gen, attr_field(map_update_attr, value), value);
713 	/* emit MAP_UPDATE_ELEM command */
714 	emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
715 	debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
716 	emit_check_err(gen);
717 }
718 
719 void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
720 {
721 	int attr_size = offsetofend(union bpf_attr, map_fd);
722 	int map_freeze_attr;
723 	union bpf_attr attr;
724 
725 	memset(&attr, 0, attr_size);
726 	pr_debug("gen: map_freeze: idx %d\n", map_idx);
727 	map_freeze_attr = add_data(gen, &attr, attr_size);
728 	move_stack2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
729 			stack_off(map_fd[map_idx]));
730 	/* emit MAP_FREEZE command */
731 	emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
732 	debug_ret(gen, "map_freeze");
733 	emit_check_err(gen);
734 }
735