xref: /openbmc/linux/tools/lib/bpf/gen_loader.c (revision 887069f4)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 /* Copyright (c) 2021 Facebook */
3 #include <stdio.h>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <errno.h>
7 #include <linux/filter.h>
8 #include "btf.h"
9 #include "bpf.h"
10 #include "libbpf.h"
11 #include "libbpf_internal.h"
12 #include "hashmap.h"
13 #include "bpf_gen_internal.h"
14 #include "skel_internal.h"
15 
16 #define MAX_USED_MAPS 64
17 #define MAX_USED_PROGS 32
18 
19 /* The following structure describes the stack layout of the loader program.
20  * In addition R6 contains the pointer to context.
21  * R7 contains the result of the last sys_bpf command (typically error or FD).
22  * R9 contains the result of the last sys_close command.
23  *
24  * Naming convention:
25  * ctx - bpf program context
26  * stack - bpf program stack
27  * blob - bpf_attr-s, strings, insns, map data.
28  *        All the bytes that loader prog will use for read/write.
29  */
30 struct loader_stack {
31 	__u32 btf_fd;
32 	__u32 map_fd[MAX_USED_MAPS];
33 	__u32 prog_fd[MAX_USED_PROGS];
34 	__u32 inner_map_fd;
35 };
36 
37 #define stack_off(field) \
38 	(__s16)(-sizeof(struct loader_stack) + offsetof(struct loader_stack, field))
39 
40 #define attr_field(attr, field) (attr + offsetof(union bpf_attr, field))
41 
42 static int realloc_insn_buf(struct bpf_gen *gen, __u32 size)
43 {
44 	size_t off = gen->insn_cur - gen->insn_start;
45 	void *insn_start;
46 
47 	if (gen->error)
48 		return gen->error;
49 	if (size > INT32_MAX || off + size > INT32_MAX) {
50 		gen->error = -ERANGE;
51 		return -ERANGE;
52 	}
53 	insn_start = realloc(gen->insn_start, off + size);
54 	if (!insn_start) {
55 		gen->error = -ENOMEM;
56 		free(gen->insn_start);
57 		gen->insn_start = NULL;
58 		return -ENOMEM;
59 	}
60 	gen->insn_start = insn_start;
61 	gen->insn_cur = insn_start + off;
62 	return 0;
63 }
64 
65 static int realloc_data_buf(struct bpf_gen *gen, __u32 size)
66 {
67 	size_t off = gen->data_cur - gen->data_start;
68 	void *data_start;
69 
70 	if (gen->error)
71 		return gen->error;
72 	if (size > INT32_MAX || off + size > INT32_MAX) {
73 		gen->error = -ERANGE;
74 		return -ERANGE;
75 	}
76 	data_start = realloc(gen->data_start, off + size);
77 	if (!data_start) {
78 		gen->error = -ENOMEM;
79 		free(gen->data_start);
80 		gen->data_start = NULL;
81 		return -ENOMEM;
82 	}
83 	gen->data_start = data_start;
84 	gen->data_cur = data_start + off;
85 	return 0;
86 }
87 
88 static void emit(struct bpf_gen *gen, struct bpf_insn insn)
89 {
90 	if (realloc_insn_buf(gen, sizeof(insn)))
91 		return;
92 	memcpy(gen->insn_cur, &insn, sizeof(insn));
93 	gen->insn_cur += sizeof(insn);
94 }
95 
96 static void emit2(struct bpf_gen *gen, struct bpf_insn insn1, struct bpf_insn insn2)
97 {
98 	emit(gen, insn1);
99 	emit(gen, insn2);
100 }
101 
102 void bpf_gen__init(struct bpf_gen *gen, int log_level)
103 {
104 	size_t stack_sz = sizeof(struct loader_stack);
105 	int i;
106 
107 	gen->log_level = log_level;
108 	/* save ctx pointer into R6 */
109 	emit(gen, BPF_MOV64_REG(BPF_REG_6, BPF_REG_1));
110 
111 	/* bzero stack */
112 	emit(gen, BPF_MOV64_REG(BPF_REG_1, BPF_REG_10));
113 	emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -stack_sz));
114 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, stack_sz));
115 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, 0));
116 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_probe_read_kernel));
117 
118 	/* jump over cleanup code */
119 	emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0,
120 			      /* size of cleanup code below */
121 			      (stack_sz / 4) * 3 + 2));
122 
123 	/* remember the label where all error branches will jump to */
124 	gen->cleanup_label = gen->insn_cur - gen->insn_start;
125 	/* emit cleanup code: close all temp FDs */
126 	for (i = 0; i < stack_sz; i += 4) {
127 		emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, -stack_sz + i));
128 		emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0, 1));
129 		emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
130 	}
131 	/* R7 contains the error code from sys_bpf. Copy it into R0 and exit. */
132 	emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
133 	emit(gen, BPF_EXIT_INSN());
134 }
135 
136 static int add_data(struct bpf_gen *gen, const void *data, __u32 size)
137 {
138 	void *prev;
139 
140 	if (realloc_data_buf(gen, size))
141 		return 0;
142 	prev = gen->data_cur;
143 	memcpy(gen->data_cur, data, size);
144 	gen->data_cur += size;
145 	return prev - gen->data_start;
146 }
147 
148 static int insn_bytes_to_bpf_size(__u32 sz)
149 {
150 	switch (sz) {
151 	case 8: return BPF_DW;
152 	case 4: return BPF_W;
153 	case 2: return BPF_H;
154 	case 1: return BPF_B;
155 	default: return -1;
156 	}
157 }
158 
159 /* *(u64 *)(blob + off) = (u64)(void *)(blob + data) */
160 static void emit_rel_store(struct bpf_gen *gen, int off, int data)
161 {
162 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
163 					 0, 0, 0, data));
164 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
165 					 0, 0, 0, off));
166 	emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
167 }
168 
169 /* *(u64 *)(blob + off) = (u64)(void *)(%sp + stack_off) */
170 static void emit_rel_store_sp(struct bpf_gen *gen, int off, int stack_off)
171 {
172 	emit(gen, BPF_MOV64_REG(BPF_REG_0, BPF_REG_10));
173 	emit(gen, BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, stack_off));
174 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
175 					 0, 0, 0, off));
176 	emit(gen, BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0));
177 }
178 
179 static void move_ctx2blob(struct bpf_gen *gen, int off, int size, int ctx_off,
180 				   bool check_non_zero)
181 {
182 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_6, ctx_off));
183 	if (check_non_zero)
184 		/* If value in ctx is zero don't update the blob.
185 		 * For example: when ctx->map.max_entries == 0, keep default max_entries from bpf.c
186 		 */
187 		emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 3));
188 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
189 					 0, 0, 0, off));
190 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
191 }
192 
193 static void move_stack2blob(struct bpf_gen *gen, int off, int size, int stack_off)
194 {
195 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
196 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
197 					 0, 0, 0, off));
198 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_1, BPF_REG_0, 0));
199 }
200 
201 static void move_stack2ctx(struct bpf_gen *gen, int ctx_off, int size, int stack_off)
202 {
203 	emit(gen, BPF_LDX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_0, BPF_REG_10, stack_off));
204 	emit(gen, BPF_STX_MEM(insn_bytes_to_bpf_size(size), BPF_REG_6, BPF_REG_0, ctx_off));
205 }
206 
207 static void emit_sys_bpf(struct bpf_gen *gen, int cmd, int attr, int attr_size)
208 {
209 	emit(gen, BPF_MOV64_IMM(BPF_REG_1, cmd));
210 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_2, BPF_PSEUDO_MAP_IDX_VALUE,
211 					 0, 0, 0, attr));
212 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, attr_size));
213 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_bpf));
214 	/* remember the result in R7 */
215 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
216 }
217 
218 static bool is_simm16(__s64 value)
219 {
220 	return value == (__s64)(__s16)value;
221 }
222 
223 static void emit_check_err(struct bpf_gen *gen)
224 {
225 	__s64 off = -(gen->insn_cur - gen->insn_start - gen->cleanup_label) / 8 - 1;
226 
227 	/* R7 contains result of last sys_bpf command.
228 	 * if (R7 < 0) goto cleanup;
229 	 */
230 	if (is_simm16(off)) {
231 		emit(gen, BPF_JMP_IMM(BPF_JSLT, BPF_REG_7, 0, off));
232 	} else {
233 		gen->error = -ERANGE;
234 		emit(gen, BPF_JMP_IMM(BPF_JA, 0, 0, -1));
235 	}
236 }
237 
238 /* reg1 and reg2 should not be R1 - R5. They can be R0, R6 - R10 */
239 static void emit_debug(struct bpf_gen *gen, int reg1, int reg2,
240 		       const char *fmt, va_list args)
241 {
242 	char buf[1024];
243 	int addr, len, ret;
244 
245 	if (!gen->log_level)
246 		return;
247 	ret = vsnprintf(buf, sizeof(buf), fmt, args);
248 	if (ret < 1024 - 7 && reg1 >= 0 && reg2 < 0)
249 		/* The special case to accommodate common debug_ret():
250 		 * to avoid specifying BPF_REG_7 and adding " r=%%d" to
251 		 * prints explicitly.
252 		 */
253 		strcat(buf, " r=%d");
254 	len = strlen(buf) + 1;
255 	addr = add_data(gen, buf, len);
256 
257 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
258 					 0, 0, 0, addr));
259 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
260 	if (reg1 >= 0)
261 		emit(gen, BPF_MOV64_REG(BPF_REG_3, reg1));
262 	if (reg2 >= 0)
263 		emit(gen, BPF_MOV64_REG(BPF_REG_4, reg2));
264 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_trace_printk));
265 }
266 
267 static void debug_regs(struct bpf_gen *gen, int reg1, int reg2, const char *fmt, ...)
268 {
269 	va_list args;
270 
271 	va_start(args, fmt);
272 	emit_debug(gen, reg1, reg2, fmt, args);
273 	va_end(args);
274 }
275 
276 static void debug_ret(struct bpf_gen *gen, const char *fmt, ...)
277 {
278 	va_list args;
279 
280 	va_start(args, fmt);
281 	emit_debug(gen, BPF_REG_7, -1, fmt, args);
282 	va_end(args);
283 }
284 
285 static void __emit_sys_close(struct bpf_gen *gen)
286 {
287 	emit(gen, BPF_JMP_IMM(BPF_JSLE, BPF_REG_1, 0,
288 			      /* 2 is the number of the following insns
289 			       * * 6 is additional insns in debug_regs
290 			       */
291 			      2 + (gen->log_level ? 6 : 0)));
292 	emit(gen, BPF_MOV64_REG(BPF_REG_9, BPF_REG_1));
293 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_sys_close));
294 	debug_regs(gen, BPF_REG_9, BPF_REG_0, "close(%%d) = %%d");
295 }
296 
297 static void emit_sys_close_stack(struct bpf_gen *gen, int stack_off)
298 {
299 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_10, stack_off));
300 	__emit_sys_close(gen);
301 }
302 
303 static void emit_sys_close_blob(struct bpf_gen *gen, int blob_off)
304 {
305 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
306 					 0, 0, 0, blob_off));
307 	emit(gen, BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0));
308 	__emit_sys_close(gen);
309 }
310 
311 int bpf_gen__finish(struct bpf_gen *gen)
312 {
313 	int i;
314 
315 	emit_sys_close_stack(gen, stack_off(btf_fd));
316 	for (i = 0; i < gen->nr_progs; i++)
317 		move_stack2ctx(gen,
318 			       sizeof(struct bpf_loader_ctx) +
319 			       sizeof(struct bpf_map_desc) * gen->nr_maps +
320 			       sizeof(struct bpf_prog_desc) * i +
321 			       offsetof(struct bpf_prog_desc, prog_fd), 4,
322 			       stack_off(prog_fd[i]));
323 	for (i = 0; i < gen->nr_maps; i++)
324 		move_stack2ctx(gen,
325 			       sizeof(struct bpf_loader_ctx) +
326 			       sizeof(struct bpf_map_desc) * i +
327 			       offsetof(struct bpf_map_desc, map_fd), 4,
328 			       stack_off(map_fd[i]));
329 	emit(gen, BPF_MOV64_IMM(BPF_REG_0, 0));
330 	emit(gen, BPF_EXIT_INSN());
331 	pr_debug("gen: finish %d\n", gen->error);
332 	if (!gen->error) {
333 		struct gen_loader_opts *opts = gen->opts;
334 
335 		opts->insns = gen->insn_start;
336 		opts->insns_sz = gen->insn_cur - gen->insn_start;
337 		opts->data = gen->data_start;
338 		opts->data_sz = gen->data_cur - gen->data_start;
339 	}
340 	return gen->error;
341 }
342 
343 void bpf_gen__free(struct bpf_gen *gen)
344 {
345 	if (!gen)
346 		return;
347 	free(gen->data_start);
348 	free(gen->insn_start);
349 	free(gen);
350 }
351 
352 void bpf_gen__load_btf(struct bpf_gen *gen, const void *btf_raw_data,
353 		       __u32 btf_raw_size)
354 {
355 	int attr_size = offsetofend(union bpf_attr, btf_log_level);
356 	int btf_data, btf_load_attr;
357 	union bpf_attr attr;
358 
359 	memset(&attr, 0, attr_size);
360 	pr_debug("gen: load_btf: size %d\n", btf_raw_size);
361 	btf_data = add_data(gen, btf_raw_data, btf_raw_size);
362 
363 	attr.btf_size = btf_raw_size;
364 	btf_load_attr = add_data(gen, &attr, attr_size);
365 
366 	/* populate union bpf_attr with user provided log details */
367 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_level), 4,
368 		      offsetof(struct bpf_loader_ctx, log_level), false);
369 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_size), 4,
370 		      offsetof(struct bpf_loader_ctx, log_size), false);
371 	move_ctx2blob(gen, attr_field(btf_load_attr, btf_log_buf), 8,
372 		      offsetof(struct bpf_loader_ctx, log_buf), false);
373 	/* populate union bpf_attr with a pointer to the BTF data */
374 	emit_rel_store(gen, attr_field(btf_load_attr, btf), btf_data);
375 	/* emit BTF_LOAD command */
376 	emit_sys_bpf(gen, BPF_BTF_LOAD, btf_load_attr, attr_size);
377 	debug_ret(gen, "btf_load size %d", btf_raw_size);
378 	emit_check_err(gen);
379 	/* remember btf_fd in the stack, if successful */
380 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7, stack_off(btf_fd)));
381 }
382 
383 void bpf_gen__map_create(struct bpf_gen *gen,
384 			 struct bpf_create_map_attr *map_attr, int map_idx)
385 {
386 	int attr_size = offsetofend(union bpf_attr, btf_vmlinux_value_type_id);
387 	bool close_inner_map_fd = false;
388 	int map_create_attr;
389 	union bpf_attr attr;
390 
391 	memset(&attr, 0, attr_size);
392 	attr.map_type = map_attr->map_type;
393 	attr.key_size = map_attr->key_size;
394 	attr.value_size = map_attr->value_size;
395 	attr.map_flags = map_attr->map_flags;
396 	memcpy(attr.map_name, map_attr->name,
397 	       min((unsigned)strlen(map_attr->name), BPF_OBJ_NAME_LEN - 1));
398 	attr.numa_node = map_attr->numa_node;
399 	attr.map_ifindex = map_attr->map_ifindex;
400 	attr.max_entries = map_attr->max_entries;
401 	switch (attr.map_type) {
402 	case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
403 	case BPF_MAP_TYPE_CGROUP_ARRAY:
404 	case BPF_MAP_TYPE_STACK_TRACE:
405 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
406 	case BPF_MAP_TYPE_HASH_OF_MAPS:
407 	case BPF_MAP_TYPE_DEVMAP:
408 	case BPF_MAP_TYPE_DEVMAP_HASH:
409 	case BPF_MAP_TYPE_CPUMAP:
410 	case BPF_MAP_TYPE_XSKMAP:
411 	case BPF_MAP_TYPE_SOCKMAP:
412 	case BPF_MAP_TYPE_SOCKHASH:
413 	case BPF_MAP_TYPE_QUEUE:
414 	case BPF_MAP_TYPE_STACK:
415 	case BPF_MAP_TYPE_RINGBUF:
416 		break;
417 	default:
418 		attr.btf_key_type_id = map_attr->btf_key_type_id;
419 		attr.btf_value_type_id = map_attr->btf_value_type_id;
420 	}
421 
422 	pr_debug("gen: map_create: %s idx %d type %d value_type_id %d\n",
423 		 attr.map_name, map_idx, map_attr->map_type, attr.btf_value_type_id);
424 
425 	map_create_attr = add_data(gen, &attr, attr_size);
426 	if (attr.btf_value_type_id)
427 		/* populate union bpf_attr with btf_fd saved in the stack earlier */
428 		move_stack2blob(gen, attr_field(map_create_attr, btf_fd), 4,
429 				stack_off(btf_fd));
430 	switch (attr.map_type) {
431 	case BPF_MAP_TYPE_ARRAY_OF_MAPS:
432 	case BPF_MAP_TYPE_HASH_OF_MAPS:
433 		move_stack2blob(gen, attr_field(map_create_attr, inner_map_fd), 4,
434 				stack_off(inner_map_fd));
435 		close_inner_map_fd = true;
436 		break;
437 	default:
438 		break;
439 	}
440 	/* conditionally update max_entries */
441 	if (map_idx >= 0)
442 		move_ctx2blob(gen, attr_field(map_create_attr, max_entries), 4,
443 			      sizeof(struct bpf_loader_ctx) +
444 			      sizeof(struct bpf_map_desc) * map_idx +
445 			      offsetof(struct bpf_map_desc, max_entries),
446 			      true /* check that max_entries != 0 */);
447 	/* emit MAP_CREATE command */
448 	emit_sys_bpf(gen, BPF_MAP_CREATE, map_create_attr, attr_size);
449 	debug_ret(gen, "map_create %s idx %d type %d value_size %d value_btf_id %d",
450 		  attr.map_name, map_idx, map_attr->map_type, attr.value_size,
451 		  attr.btf_value_type_id);
452 	emit_check_err(gen);
453 	/* remember map_fd in the stack, if successful */
454 	if (map_idx < 0) {
455 		/* This bpf_gen__map_create() function is called with map_idx >= 0
456 		 * for all maps that libbpf loading logic tracks.
457 		 * It's called with -1 to create an inner map.
458 		 */
459 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
460 				      stack_off(inner_map_fd)));
461 	} else if (map_idx != gen->nr_maps) {
462 		gen->error = -EDOM; /* internal bug */
463 		return;
464 	} else {
465 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
466 				      stack_off(map_fd[map_idx])));
467 		gen->nr_maps++;
468 	}
469 	if (close_inner_map_fd)
470 		emit_sys_close_stack(gen, stack_off(inner_map_fd));
471 }
472 
473 void bpf_gen__record_attach_target(struct bpf_gen *gen, const char *attach_name,
474 				   enum bpf_attach_type type)
475 {
476 	const char *prefix;
477 	int kind, ret;
478 
479 	btf_get_kernel_prefix_kind(type, &prefix, &kind);
480 	gen->attach_kind = kind;
481 	ret = snprintf(gen->attach_target, sizeof(gen->attach_target), "%s%s",
482 		       prefix, attach_name);
483 	if (ret == sizeof(gen->attach_target))
484 		gen->error = -ENOSPC;
485 }
486 
487 static void emit_find_attach_target(struct bpf_gen *gen)
488 {
489 	int name, len = strlen(gen->attach_target) + 1;
490 
491 	pr_debug("gen: find_attach_tgt %s %d\n", gen->attach_target, gen->attach_kind);
492 	name = add_data(gen, gen->attach_target, len);
493 
494 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
495 					 0, 0, 0, name));
496 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
497 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, gen->attach_kind));
498 	emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
499 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
500 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
501 	debug_ret(gen, "find_by_name_kind(%s,%d)",
502 		  gen->attach_target, gen->attach_kind);
503 	emit_check_err(gen);
504 	/* if successful, btf_id is in lower 32-bit of R7 and
505 	 * btf_obj_fd is in upper 32-bit
506 	 */
507 }
508 
509 void bpf_gen__record_extern(struct bpf_gen *gen, const char *name, int kind,
510 			    int insn_idx)
511 {
512 	struct ksym_relo_desc *relo;
513 
514 	relo = libbpf_reallocarray(gen->relos, gen->relo_cnt + 1, sizeof(*relo));
515 	if (!relo) {
516 		gen->error = -ENOMEM;
517 		return;
518 	}
519 	gen->relos = relo;
520 	relo += gen->relo_cnt;
521 	relo->name = name;
522 	relo->kind = kind;
523 	relo->insn_idx = insn_idx;
524 	gen->relo_cnt++;
525 }
526 
527 static void emit_relo(struct bpf_gen *gen, struct ksym_relo_desc *relo, int insns)
528 {
529 	int name, insn, len = strlen(relo->name) + 1;
530 
531 	pr_debug("gen: emit_relo: %s at %d\n", relo->name, relo->insn_idx);
532 	name = add_data(gen, relo->name, len);
533 
534 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
535 					 0, 0, 0, name));
536 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, len));
537 	emit(gen, BPF_MOV64_IMM(BPF_REG_3, relo->kind));
538 	emit(gen, BPF_MOV64_IMM(BPF_REG_4, 0));
539 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_btf_find_by_name_kind));
540 	emit(gen, BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
541 	debug_ret(gen, "find_by_name_kind(%s,%d)", relo->name, relo->kind);
542 	emit_check_err(gen);
543 	/* store btf_id into insn[insn_idx].imm */
544 	insn = insns + sizeof(struct bpf_insn) * relo->insn_idx +
545 		offsetof(struct bpf_insn, imm);
546 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
547 					 0, 0, 0, insn));
548 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7, 0));
549 	if (relo->kind == BTF_KIND_VAR) {
550 		/* store btf_obj_fd into insn[insn_idx + 1].imm */
551 		emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
552 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
553 				      sizeof(struct bpf_insn)));
554 	}
555 }
556 
557 static void emit_relos(struct bpf_gen *gen, int insns)
558 {
559 	int i;
560 
561 	for (i = 0; i < gen->relo_cnt; i++)
562 		emit_relo(gen, gen->relos + i, insns);
563 }
564 
565 static void cleanup_relos(struct bpf_gen *gen, int insns)
566 {
567 	int i, insn;
568 
569 	for (i = 0; i < gen->relo_cnt; i++) {
570 		if (gen->relos[i].kind != BTF_KIND_VAR)
571 			continue;
572 		/* close fd recorded in insn[insn_idx + 1].imm */
573 		insn = insns +
574 			sizeof(struct bpf_insn) * (gen->relos[i].insn_idx + 1) +
575 			offsetof(struct bpf_insn, imm);
576 		emit_sys_close_blob(gen, insn);
577 	}
578 	if (gen->relo_cnt) {
579 		free(gen->relos);
580 		gen->relo_cnt = 0;
581 		gen->relos = NULL;
582 	}
583 }
584 
585 void bpf_gen__prog_load(struct bpf_gen *gen,
586 			struct bpf_prog_load_params *load_attr, int prog_idx)
587 {
588 	int attr_size = offsetofend(union bpf_attr, fd_array);
589 	int prog_load_attr, license, insns, func_info, line_info;
590 	union bpf_attr attr;
591 
592 	memset(&attr, 0, attr_size);
593 	pr_debug("gen: prog_load: type %d insns_cnt %zd\n",
594 		 load_attr->prog_type, load_attr->insn_cnt);
595 	/* add license string to blob of bytes */
596 	license = add_data(gen, load_attr->license, strlen(load_attr->license) + 1);
597 	/* add insns to blob of bytes */
598 	insns = add_data(gen, load_attr->insns,
599 			 load_attr->insn_cnt * sizeof(struct bpf_insn));
600 
601 	attr.prog_type = load_attr->prog_type;
602 	attr.expected_attach_type = load_attr->expected_attach_type;
603 	attr.attach_btf_id = load_attr->attach_btf_id;
604 	attr.prog_ifindex = load_attr->prog_ifindex;
605 	attr.kern_version = 0;
606 	attr.insn_cnt = (__u32)load_attr->insn_cnt;
607 	attr.prog_flags = load_attr->prog_flags;
608 
609 	attr.func_info_rec_size = load_attr->func_info_rec_size;
610 	attr.func_info_cnt = load_attr->func_info_cnt;
611 	func_info = add_data(gen, load_attr->func_info,
612 			     attr.func_info_cnt * attr.func_info_rec_size);
613 
614 	attr.line_info_rec_size = load_attr->line_info_rec_size;
615 	attr.line_info_cnt = load_attr->line_info_cnt;
616 	line_info = add_data(gen, load_attr->line_info,
617 			     attr.line_info_cnt * attr.line_info_rec_size);
618 
619 	memcpy(attr.prog_name, load_attr->name,
620 	       min((unsigned)strlen(load_attr->name), BPF_OBJ_NAME_LEN - 1));
621 	prog_load_attr = add_data(gen, &attr, attr_size);
622 
623 	/* populate union bpf_attr with a pointer to license */
624 	emit_rel_store(gen, attr_field(prog_load_attr, license), license);
625 
626 	/* populate union bpf_attr with a pointer to instructions */
627 	emit_rel_store(gen, attr_field(prog_load_attr, insns), insns);
628 
629 	/* populate union bpf_attr with a pointer to func_info */
630 	emit_rel_store(gen, attr_field(prog_load_attr, func_info), func_info);
631 
632 	/* populate union bpf_attr with a pointer to line_info */
633 	emit_rel_store(gen, attr_field(prog_load_attr, line_info), line_info);
634 
635 	/* populate union bpf_attr fd_array with a pointer to stack where map_fds are saved */
636 	emit_rel_store_sp(gen, attr_field(prog_load_attr, fd_array),
637 			  stack_off(map_fd[0]));
638 
639 	/* populate union bpf_attr with user provided log details */
640 	move_ctx2blob(gen, attr_field(prog_load_attr, log_level), 4,
641 		      offsetof(struct bpf_loader_ctx, log_level), false);
642 	move_ctx2blob(gen, attr_field(prog_load_attr, log_size), 4,
643 		      offsetof(struct bpf_loader_ctx, log_size), false);
644 	move_ctx2blob(gen, attr_field(prog_load_attr, log_buf), 8,
645 		      offsetof(struct bpf_loader_ctx, log_buf), false);
646 	/* populate union bpf_attr with btf_fd saved in the stack earlier */
647 	move_stack2blob(gen, attr_field(prog_load_attr, prog_btf_fd), 4,
648 			stack_off(btf_fd));
649 	if (gen->attach_kind) {
650 		emit_find_attach_target(gen);
651 		/* populate union bpf_attr with btf_id and btf_obj_fd found by helper */
652 		emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_0, BPF_PSEUDO_MAP_IDX_VALUE,
653 						 0, 0, 0, prog_load_attr));
654 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
655 				      offsetof(union bpf_attr, attach_btf_id)));
656 		emit(gen, BPF_ALU64_IMM(BPF_RSH, BPF_REG_7, 32));
657 		emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_7,
658 				      offsetof(union bpf_attr, attach_btf_obj_fd)));
659 	}
660 	emit_relos(gen, insns);
661 	/* emit PROG_LOAD command */
662 	emit_sys_bpf(gen, BPF_PROG_LOAD, prog_load_attr, attr_size);
663 	debug_ret(gen, "prog_load %s insn_cnt %d", attr.prog_name, attr.insn_cnt);
664 	/* successful or not, close btf module FDs used in extern ksyms and attach_btf_obj_fd */
665 	cleanup_relos(gen, insns);
666 	if (gen->attach_kind)
667 		emit_sys_close_blob(gen,
668 				    attr_field(prog_load_attr, attach_btf_obj_fd));
669 	emit_check_err(gen);
670 	/* remember prog_fd in the stack, if successful */
671 	emit(gen, BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_7,
672 			      stack_off(prog_fd[gen->nr_progs])));
673 	gen->nr_progs++;
674 }
675 
676 void bpf_gen__map_update_elem(struct bpf_gen *gen, int map_idx, void *pvalue,
677 			      __u32 value_size)
678 {
679 	int attr_size = offsetofend(union bpf_attr, flags);
680 	int map_update_attr, value, key;
681 	union bpf_attr attr;
682 	int zero = 0;
683 
684 	memset(&attr, 0, attr_size);
685 	pr_debug("gen: map_update_elem: idx %d\n", map_idx);
686 
687 	value = add_data(gen, pvalue, value_size);
688 	key = add_data(gen, &zero, sizeof(zero));
689 
690 	/* if (map_desc[map_idx].initial_value)
691 	 *    copy_from_user(value, initial_value, value_size);
692 	 */
693 	emit(gen, BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_6,
694 			      sizeof(struct bpf_loader_ctx) +
695 			      sizeof(struct bpf_map_desc) * map_idx +
696 			      offsetof(struct bpf_map_desc, initial_value)));
697 	emit(gen, BPF_JMP_IMM(BPF_JEQ, BPF_REG_3, 0, 4));
698 	emit2(gen, BPF_LD_IMM64_RAW_FULL(BPF_REG_1, BPF_PSEUDO_MAP_IDX_VALUE,
699 					 0, 0, 0, value));
700 	emit(gen, BPF_MOV64_IMM(BPF_REG_2, value_size));
701 	emit(gen, BPF_EMIT_CALL(BPF_FUNC_copy_from_user));
702 
703 	map_update_attr = add_data(gen, &attr, attr_size);
704 	move_stack2blob(gen, attr_field(map_update_attr, map_fd), 4,
705 			stack_off(map_fd[map_idx]));
706 	emit_rel_store(gen, attr_field(map_update_attr, key), key);
707 	emit_rel_store(gen, attr_field(map_update_attr, value), value);
708 	/* emit MAP_UPDATE_ELEM command */
709 	emit_sys_bpf(gen, BPF_MAP_UPDATE_ELEM, map_update_attr, attr_size);
710 	debug_ret(gen, "update_elem idx %d value_size %d", map_idx, value_size);
711 	emit_check_err(gen);
712 }
713 
714 void bpf_gen__map_freeze(struct bpf_gen *gen, int map_idx)
715 {
716 	int attr_size = offsetofend(union bpf_attr, map_fd);
717 	int map_freeze_attr;
718 	union bpf_attr attr;
719 
720 	memset(&attr, 0, attr_size);
721 	pr_debug("gen: map_freeze: idx %d\n", map_idx);
722 	map_freeze_attr = add_data(gen, &attr, attr_size);
723 	move_stack2blob(gen, attr_field(map_freeze_attr, map_fd), 4,
724 			stack_off(map_fd[map_idx]));
725 	/* emit MAP_FREEZE command */
726 	emit_sys_bpf(gen, BPF_MAP_FREEZE, map_freeze_attr, attr_size);
727 	debug_ret(gen, "map_freeze");
728 	emit_check_err(gen);
729 }
730