xref: /openbmc/linux/tools/lib/bpf/libbpf.c (revision 0ca650c1)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  */
11 
12 #ifndef _GNU_SOURCE
13 #define _GNU_SOURCE
14 #endif
15 #include <stdlib.h>
16 #include <stdio.h>
17 #include <stdarg.h>
18 #include <libgen.h>
19 #include <inttypes.h>
20 #include <string.h>
21 #include <unistd.h>
22 #include <fcntl.h>
23 #include <errno.h>
24 #include <asm/unistd.h>
25 #include <linux/err.h>
26 #include <linux/kernel.h>
27 #include <linux/bpf.h>
28 #include <linux/btf.h>
29 #include <linux/filter.h>
30 #include <linux/list.h>
31 #include <linux/limits.h>
32 #include <linux/perf_event.h>
33 #include <linux/ring_buffer.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/vfs.h>
37 #include <tools/libc_compat.h>
38 #include <libelf.h>
39 #include <gelf.h>
40 
41 #include "libbpf.h"
42 #include "bpf.h"
43 #include "btf.h"
44 #include "str_error.h"
45 #include "libbpf_util.h"
46 
47 #ifndef EM_BPF
48 #define EM_BPF 247
49 #endif
50 
51 #ifndef BPF_FS_MAGIC
52 #define BPF_FS_MAGIC		0xcafe4a11
53 #endif
54 
55 #define __printf(a, b)	__attribute__((format(printf, a, b)))
56 
57 static int __base_pr(enum libbpf_print_level level, const char *format,
58 		     va_list args)
59 {
60 	if (level == LIBBPF_DEBUG)
61 		return 0;
62 
63 	return vfprintf(stderr, format, args);
64 }
65 
66 static libbpf_print_fn_t __libbpf_pr = __base_pr;
67 
68 void libbpf_set_print(libbpf_print_fn_t fn)
69 {
70 	__libbpf_pr = fn;
71 }
72 
73 __printf(2, 3)
74 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
75 {
76 	va_list args;
77 
78 	if (!__libbpf_pr)
79 		return;
80 
81 	va_start(args, format);
82 	__libbpf_pr(level, format, args);
83 	va_end(args);
84 }
85 
86 #define STRERR_BUFSIZE  128
87 
88 #define CHECK_ERR(action, err, out) do {	\
89 	err = action;			\
90 	if (err)			\
91 		goto out;		\
92 } while(0)
93 
94 
95 /* Copied from tools/perf/util/util.h */
96 #ifndef zfree
97 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
98 #endif
99 
100 #ifndef zclose
101 # define zclose(fd) ({			\
102 	int ___err = 0;			\
103 	if ((fd) >= 0)			\
104 		___err = close((fd));	\
105 	fd = -1;			\
106 	___err; })
107 #endif
108 
109 #ifdef HAVE_LIBELF_MMAP_SUPPORT
110 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
111 #else
112 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
113 #endif
114 
115 struct bpf_capabilities {
116 	/* v4.14: kernel support for program & map names. */
117 	__u32 name:1;
118 };
119 
120 /*
121  * bpf_prog should be a better name but it has been used in
122  * linux/filter.h.
123  */
124 struct bpf_program {
125 	/* Index in elf obj file, for relocation use. */
126 	int idx;
127 	char *name;
128 	int prog_ifindex;
129 	char *section_name;
130 	/* section_name with / replaced by _; makes recursive pinning
131 	 * in bpf_object__pin_programs easier
132 	 */
133 	char *pin_name;
134 	struct bpf_insn *insns;
135 	size_t insns_cnt, main_prog_cnt;
136 	enum bpf_prog_type type;
137 
138 	struct reloc_desc {
139 		enum {
140 			RELO_LD64,
141 			RELO_CALL,
142 		} type;
143 		int insn_idx;
144 		union {
145 			int map_idx;
146 			int text_off;
147 		};
148 	} *reloc_desc;
149 	int nr_reloc;
150 
151 	struct {
152 		int nr;
153 		int *fds;
154 	} instances;
155 	bpf_program_prep_t preprocessor;
156 
157 	struct bpf_object *obj;
158 	void *priv;
159 	bpf_program_clear_priv_t clear_priv;
160 
161 	enum bpf_attach_type expected_attach_type;
162 	int btf_fd;
163 	void *func_info;
164 	__u32 func_info_rec_size;
165 	__u32 func_info_cnt;
166 
167 	struct bpf_capabilities *caps;
168 
169 	void *line_info;
170 	__u32 line_info_rec_size;
171 	__u32 line_info_cnt;
172 };
173 
174 struct bpf_map {
175 	int fd;
176 	char *name;
177 	size_t offset;
178 	int map_ifindex;
179 	int inner_map_fd;
180 	struct bpf_map_def def;
181 	__u32 btf_key_type_id;
182 	__u32 btf_value_type_id;
183 	void *priv;
184 	bpf_map_clear_priv_t clear_priv;
185 };
186 
187 static LIST_HEAD(bpf_objects_list);
188 
189 struct bpf_object {
190 	char license[64];
191 	__u32 kern_version;
192 
193 	struct bpf_program *programs;
194 	size_t nr_programs;
195 	struct bpf_map *maps;
196 	size_t nr_maps;
197 
198 	bool loaded;
199 	bool has_pseudo_calls;
200 
201 	/*
202 	 * Information when doing elf related work. Only valid if fd
203 	 * is valid.
204 	 */
205 	struct {
206 		int fd;
207 		void *obj_buf;
208 		size_t obj_buf_sz;
209 		Elf *elf;
210 		GElf_Ehdr ehdr;
211 		Elf_Data *symbols;
212 		size_t strtabidx;
213 		struct {
214 			GElf_Shdr shdr;
215 			Elf_Data *data;
216 		} *reloc;
217 		int nr_reloc;
218 		int maps_shndx;
219 		int text_shndx;
220 	} efile;
221 	/*
222 	 * All loaded bpf_object is linked in a list, which is
223 	 * hidden to caller. bpf_objects__<func> handlers deal with
224 	 * all objects.
225 	 */
226 	struct list_head list;
227 
228 	struct btf *btf;
229 	struct btf_ext *btf_ext;
230 
231 	void *priv;
232 	bpf_object_clear_priv_t clear_priv;
233 
234 	struct bpf_capabilities caps;
235 
236 	char path[];
237 };
238 #define obj_elf_valid(o)	((o)->efile.elf)
239 
240 void bpf_program__unload(struct bpf_program *prog)
241 {
242 	int i;
243 
244 	if (!prog)
245 		return;
246 
247 	/*
248 	 * If the object is opened but the program was never loaded,
249 	 * it is possible that prog->instances.nr == -1.
250 	 */
251 	if (prog->instances.nr > 0) {
252 		for (i = 0; i < prog->instances.nr; i++)
253 			zclose(prog->instances.fds[i]);
254 	} else if (prog->instances.nr != -1) {
255 		pr_warning("Internal error: instances.nr is %d\n",
256 			   prog->instances.nr);
257 	}
258 
259 	prog->instances.nr = -1;
260 	zfree(&prog->instances.fds);
261 
262 	zclose(prog->btf_fd);
263 	zfree(&prog->func_info);
264 	zfree(&prog->line_info);
265 }
266 
267 static void bpf_program__exit(struct bpf_program *prog)
268 {
269 	if (!prog)
270 		return;
271 
272 	if (prog->clear_priv)
273 		prog->clear_priv(prog, prog->priv);
274 
275 	prog->priv = NULL;
276 	prog->clear_priv = NULL;
277 
278 	bpf_program__unload(prog);
279 	zfree(&prog->name);
280 	zfree(&prog->section_name);
281 	zfree(&prog->pin_name);
282 	zfree(&prog->insns);
283 	zfree(&prog->reloc_desc);
284 
285 	prog->nr_reloc = 0;
286 	prog->insns_cnt = 0;
287 	prog->idx = -1;
288 }
289 
290 static char *__bpf_program__pin_name(struct bpf_program *prog)
291 {
292 	char *name, *p;
293 
294 	name = p = strdup(prog->section_name);
295 	while ((p = strchr(p, '/')))
296 		*p = '_';
297 
298 	return name;
299 }
300 
301 static int
302 bpf_program__init(void *data, size_t size, char *section_name, int idx,
303 		  struct bpf_program *prog)
304 {
305 	if (size < sizeof(struct bpf_insn)) {
306 		pr_warning("corrupted section '%s'\n", section_name);
307 		return -EINVAL;
308 	}
309 
310 	memset(prog, 0, sizeof(*prog));
311 
312 	prog->section_name = strdup(section_name);
313 	if (!prog->section_name) {
314 		pr_warning("failed to alloc name for prog under section(%d) %s\n",
315 			   idx, section_name);
316 		goto errout;
317 	}
318 
319 	prog->pin_name = __bpf_program__pin_name(prog);
320 	if (!prog->pin_name) {
321 		pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
322 			   idx, section_name);
323 		goto errout;
324 	}
325 
326 	prog->insns = malloc(size);
327 	if (!prog->insns) {
328 		pr_warning("failed to alloc insns for prog under section %s\n",
329 			   section_name);
330 		goto errout;
331 	}
332 	prog->insns_cnt = size / sizeof(struct bpf_insn);
333 	memcpy(prog->insns, data,
334 	       prog->insns_cnt * sizeof(struct bpf_insn));
335 	prog->idx = idx;
336 	prog->instances.fds = NULL;
337 	prog->instances.nr = -1;
338 	prog->type = BPF_PROG_TYPE_UNSPEC;
339 	prog->btf_fd = -1;
340 
341 	return 0;
342 errout:
343 	bpf_program__exit(prog);
344 	return -ENOMEM;
345 }
346 
347 static int
348 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
349 			char *section_name, int idx)
350 {
351 	struct bpf_program prog, *progs;
352 	int nr_progs, err;
353 
354 	err = bpf_program__init(data, size, section_name, idx, &prog);
355 	if (err)
356 		return err;
357 
358 	prog.caps = &obj->caps;
359 	progs = obj->programs;
360 	nr_progs = obj->nr_programs;
361 
362 	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
363 	if (!progs) {
364 		/*
365 		 * In this case the original obj->programs
366 		 * is still valid, so don't need special treat for
367 		 * bpf_close_object().
368 		 */
369 		pr_warning("failed to alloc a new program under section '%s'\n",
370 			   section_name);
371 		bpf_program__exit(&prog);
372 		return -ENOMEM;
373 	}
374 
375 	pr_debug("found program %s\n", prog.section_name);
376 	obj->programs = progs;
377 	obj->nr_programs = nr_progs + 1;
378 	prog.obj = obj;
379 	progs[nr_progs] = prog;
380 	return 0;
381 }
382 
383 static int
384 bpf_object__init_prog_names(struct bpf_object *obj)
385 {
386 	Elf_Data *symbols = obj->efile.symbols;
387 	struct bpf_program *prog;
388 	size_t pi, si;
389 
390 	for (pi = 0; pi < obj->nr_programs; pi++) {
391 		const char *name = NULL;
392 
393 		prog = &obj->programs[pi];
394 
395 		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
396 		     si++) {
397 			GElf_Sym sym;
398 
399 			if (!gelf_getsym(symbols, si, &sym))
400 				continue;
401 			if (sym.st_shndx != prog->idx)
402 				continue;
403 			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
404 				continue;
405 
406 			name = elf_strptr(obj->efile.elf,
407 					  obj->efile.strtabidx,
408 					  sym.st_name);
409 			if (!name) {
410 				pr_warning("failed to get sym name string for prog %s\n",
411 					   prog->section_name);
412 				return -LIBBPF_ERRNO__LIBELF;
413 			}
414 		}
415 
416 		if (!name && prog->idx == obj->efile.text_shndx)
417 			name = ".text";
418 
419 		if (!name) {
420 			pr_warning("failed to find sym for prog %s\n",
421 				   prog->section_name);
422 			return -EINVAL;
423 		}
424 
425 		prog->name = strdup(name);
426 		if (!prog->name) {
427 			pr_warning("failed to allocate memory for prog sym %s\n",
428 				   name);
429 			return -ENOMEM;
430 		}
431 	}
432 
433 	return 0;
434 }
435 
436 static struct bpf_object *bpf_object__new(const char *path,
437 					  void *obj_buf,
438 					  size_t obj_buf_sz)
439 {
440 	struct bpf_object *obj;
441 
442 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
443 	if (!obj) {
444 		pr_warning("alloc memory failed for %s\n", path);
445 		return ERR_PTR(-ENOMEM);
446 	}
447 
448 	strcpy(obj->path, path);
449 	obj->efile.fd = -1;
450 
451 	/*
452 	 * Caller of this function should also calls
453 	 * bpf_object__elf_finish() after data collection to return
454 	 * obj_buf to user. If not, we should duplicate the buffer to
455 	 * avoid user freeing them before elf finish.
456 	 */
457 	obj->efile.obj_buf = obj_buf;
458 	obj->efile.obj_buf_sz = obj_buf_sz;
459 	obj->efile.maps_shndx = -1;
460 
461 	obj->loaded = false;
462 
463 	INIT_LIST_HEAD(&obj->list);
464 	list_add(&obj->list, &bpf_objects_list);
465 	return obj;
466 }
467 
468 static void bpf_object__elf_finish(struct bpf_object *obj)
469 {
470 	if (!obj_elf_valid(obj))
471 		return;
472 
473 	if (obj->efile.elf) {
474 		elf_end(obj->efile.elf);
475 		obj->efile.elf = NULL;
476 	}
477 	obj->efile.symbols = NULL;
478 
479 	zfree(&obj->efile.reloc);
480 	obj->efile.nr_reloc = 0;
481 	zclose(obj->efile.fd);
482 	obj->efile.obj_buf = NULL;
483 	obj->efile.obj_buf_sz = 0;
484 }
485 
486 static int bpf_object__elf_init(struct bpf_object *obj)
487 {
488 	int err = 0;
489 	GElf_Ehdr *ep;
490 
491 	if (obj_elf_valid(obj)) {
492 		pr_warning("elf init: internal error\n");
493 		return -LIBBPF_ERRNO__LIBELF;
494 	}
495 
496 	if (obj->efile.obj_buf_sz > 0) {
497 		/*
498 		 * obj_buf should have been validated by
499 		 * bpf_object__open_buffer().
500 		 */
501 		obj->efile.elf = elf_memory(obj->efile.obj_buf,
502 					    obj->efile.obj_buf_sz);
503 	} else {
504 		obj->efile.fd = open(obj->path, O_RDONLY);
505 		if (obj->efile.fd < 0) {
506 			char errmsg[STRERR_BUFSIZE];
507 			char *cp = libbpf_strerror_r(errno, errmsg,
508 						     sizeof(errmsg));
509 
510 			pr_warning("failed to open %s: %s\n", obj->path, cp);
511 			return -errno;
512 		}
513 
514 		obj->efile.elf = elf_begin(obj->efile.fd,
515 				LIBBPF_ELF_C_READ_MMAP,
516 				NULL);
517 	}
518 
519 	if (!obj->efile.elf) {
520 		pr_warning("failed to open %s as ELF file\n",
521 				obj->path);
522 		err = -LIBBPF_ERRNO__LIBELF;
523 		goto errout;
524 	}
525 
526 	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
527 		pr_warning("failed to get EHDR from %s\n",
528 				obj->path);
529 		err = -LIBBPF_ERRNO__FORMAT;
530 		goto errout;
531 	}
532 	ep = &obj->efile.ehdr;
533 
534 	/* Old LLVM set e_machine to EM_NONE */
535 	if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
536 		pr_warning("%s is not an eBPF object file\n",
537 			obj->path);
538 		err = -LIBBPF_ERRNO__FORMAT;
539 		goto errout;
540 	}
541 
542 	return 0;
543 errout:
544 	bpf_object__elf_finish(obj);
545 	return err;
546 }
547 
548 static int
549 bpf_object__check_endianness(struct bpf_object *obj)
550 {
551 	static unsigned int const endian = 1;
552 
553 	switch (obj->efile.ehdr.e_ident[EI_DATA]) {
554 	case ELFDATA2LSB:
555 		/* We are big endian, BPF obj is little endian. */
556 		if (*(unsigned char const *)&endian != 1)
557 			goto mismatch;
558 		break;
559 
560 	case ELFDATA2MSB:
561 		/* We are little endian, BPF obj is big endian. */
562 		if (*(unsigned char const *)&endian != 0)
563 			goto mismatch;
564 		break;
565 	default:
566 		return -LIBBPF_ERRNO__ENDIAN;
567 	}
568 
569 	return 0;
570 
571 mismatch:
572 	pr_warning("Error: endianness mismatch.\n");
573 	return -LIBBPF_ERRNO__ENDIAN;
574 }
575 
576 static int
577 bpf_object__init_license(struct bpf_object *obj,
578 			 void *data, size_t size)
579 {
580 	memcpy(obj->license, data,
581 	       min(size, sizeof(obj->license) - 1));
582 	pr_debug("license of %s is %s\n", obj->path, obj->license);
583 	return 0;
584 }
585 
586 static int
587 bpf_object__init_kversion(struct bpf_object *obj,
588 			  void *data, size_t size)
589 {
590 	__u32 kver;
591 
592 	if (size != sizeof(kver)) {
593 		pr_warning("invalid kver section in %s\n", obj->path);
594 		return -LIBBPF_ERRNO__FORMAT;
595 	}
596 	memcpy(&kver, data, sizeof(kver));
597 	obj->kern_version = kver;
598 	pr_debug("kernel version of %s is %x\n", obj->path,
599 		 obj->kern_version);
600 	return 0;
601 }
602 
603 static int compare_bpf_map(const void *_a, const void *_b)
604 {
605 	const struct bpf_map *a = _a;
606 	const struct bpf_map *b = _b;
607 
608 	return a->offset - b->offset;
609 }
610 
611 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
612 {
613 	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
614 	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
615 		return true;
616 	return false;
617 }
618 
619 static int
620 bpf_object__init_maps(struct bpf_object *obj, int flags)
621 {
622 	bool strict = !(flags & MAPS_RELAX_COMPAT);
623 	int i, map_idx, map_def_sz, nr_maps = 0;
624 	Elf_Scn *scn;
625 	Elf_Data *data;
626 	Elf_Data *symbols = obj->efile.symbols;
627 
628 	if (obj->efile.maps_shndx < 0)
629 		return -EINVAL;
630 	if (!symbols)
631 		return -EINVAL;
632 
633 	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
634 	if (scn)
635 		data = elf_getdata(scn, NULL);
636 	if (!scn || !data) {
637 		pr_warning("failed to get Elf_Data from map section %d\n",
638 			   obj->efile.maps_shndx);
639 		return -EINVAL;
640 	}
641 
642 	/*
643 	 * Count number of maps. Each map has a name.
644 	 * Array of maps is not supported: only the first element is
645 	 * considered.
646 	 *
647 	 * TODO: Detect array of map and report error.
648 	 */
649 	for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
650 		GElf_Sym sym;
651 
652 		if (!gelf_getsym(symbols, i, &sym))
653 			continue;
654 		if (sym.st_shndx != obj->efile.maps_shndx)
655 			continue;
656 		nr_maps++;
657 	}
658 
659 	/* Alloc obj->maps and fill nr_maps. */
660 	pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
661 		 nr_maps, data->d_size);
662 
663 	if (!nr_maps)
664 		return 0;
665 
666 	/* Assume equally sized map definitions */
667 	map_def_sz = data->d_size / nr_maps;
668 	if (!data->d_size || (data->d_size % nr_maps) != 0) {
669 		pr_warning("unable to determine map definition size "
670 			   "section %s, %d maps in %zd bytes\n",
671 			   obj->path, nr_maps, data->d_size);
672 		return -EINVAL;
673 	}
674 
675 	obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
676 	if (!obj->maps) {
677 		pr_warning("alloc maps for object failed\n");
678 		return -ENOMEM;
679 	}
680 	obj->nr_maps = nr_maps;
681 
682 	for (i = 0; i < nr_maps; i++) {
683 		/*
684 		 * fill all fd with -1 so won't close incorrect
685 		 * fd (fd=0 is stdin) when failure (zclose won't close
686 		 * negative fd)).
687 		 */
688 		obj->maps[i].fd = -1;
689 		obj->maps[i].inner_map_fd = -1;
690 	}
691 
692 	/*
693 	 * Fill obj->maps using data in "maps" section.
694 	 */
695 	for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
696 		GElf_Sym sym;
697 		const char *map_name;
698 		struct bpf_map_def *def;
699 
700 		if (!gelf_getsym(symbols, i, &sym))
701 			continue;
702 		if (sym.st_shndx != obj->efile.maps_shndx)
703 			continue;
704 
705 		map_name = elf_strptr(obj->efile.elf,
706 				      obj->efile.strtabidx,
707 				      sym.st_name);
708 		obj->maps[map_idx].offset = sym.st_value;
709 		if (sym.st_value + map_def_sz > data->d_size) {
710 			pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
711 				   obj->path, map_name);
712 			return -EINVAL;
713 		}
714 
715 		obj->maps[map_idx].name = strdup(map_name);
716 		if (!obj->maps[map_idx].name) {
717 			pr_warning("failed to alloc map name\n");
718 			return -ENOMEM;
719 		}
720 		pr_debug("map %d is \"%s\"\n", map_idx,
721 			 obj->maps[map_idx].name);
722 		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
723 		/*
724 		 * If the definition of the map in the object file fits in
725 		 * bpf_map_def, copy it.  Any extra fields in our version
726 		 * of bpf_map_def will default to zero as a result of the
727 		 * calloc above.
728 		 */
729 		if (map_def_sz <= sizeof(struct bpf_map_def)) {
730 			memcpy(&obj->maps[map_idx].def, def, map_def_sz);
731 		} else {
732 			/*
733 			 * Here the map structure being read is bigger than what
734 			 * we expect, truncate if the excess bits are all zero.
735 			 * If they are not zero, reject this map as
736 			 * incompatible.
737 			 */
738 			char *b;
739 			for (b = ((char *)def) + sizeof(struct bpf_map_def);
740 			     b < ((char *)def) + map_def_sz; b++) {
741 				if (*b != 0) {
742 					pr_warning("maps section in %s: \"%s\" "
743 						   "has unrecognized, non-zero "
744 						   "options\n",
745 						   obj->path, map_name);
746 					if (strict)
747 						return -EINVAL;
748 				}
749 			}
750 			memcpy(&obj->maps[map_idx].def, def,
751 			       sizeof(struct bpf_map_def));
752 		}
753 		map_idx++;
754 	}
755 
756 	qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
757 	return 0;
758 }
759 
760 static bool section_have_execinstr(struct bpf_object *obj, int idx)
761 {
762 	Elf_Scn *scn;
763 	GElf_Shdr sh;
764 
765 	scn = elf_getscn(obj->efile.elf, idx);
766 	if (!scn)
767 		return false;
768 
769 	if (gelf_getshdr(scn, &sh) != &sh)
770 		return false;
771 
772 	if (sh.sh_flags & SHF_EXECINSTR)
773 		return true;
774 
775 	return false;
776 }
777 
778 static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
779 {
780 	Elf *elf = obj->efile.elf;
781 	GElf_Ehdr *ep = &obj->efile.ehdr;
782 	Elf_Data *btf_ext_data = NULL;
783 	Elf_Scn *scn = NULL;
784 	int idx = 0, err = 0;
785 
786 	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
787 	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
788 		pr_warning("failed to get e_shstrndx from %s\n",
789 			   obj->path);
790 		return -LIBBPF_ERRNO__FORMAT;
791 	}
792 
793 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
794 		char *name;
795 		GElf_Shdr sh;
796 		Elf_Data *data;
797 
798 		idx++;
799 		if (gelf_getshdr(scn, &sh) != &sh) {
800 			pr_warning("failed to get section(%d) header from %s\n",
801 				   idx, obj->path);
802 			err = -LIBBPF_ERRNO__FORMAT;
803 			goto out;
804 		}
805 
806 		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
807 		if (!name) {
808 			pr_warning("failed to get section(%d) name from %s\n",
809 				   idx, obj->path);
810 			err = -LIBBPF_ERRNO__FORMAT;
811 			goto out;
812 		}
813 
814 		data = elf_getdata(scn, 0);
815 		if (!data) {
816 			pr_warning("failed to get section(%d) data from %s(%s)\n",
817 				   idx, name, obj->path);
818 			err = -LIBBPF_ERRNO__FORMAT;
819 			goto out;
820 		}
821 		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
822 			 idx, name, (unsigned long)data->d_size,
823 			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
824 			 (int)sh.sh_type);
825 
826 		if (strcmp(name, "license") == 0)
827 			err = bpf_object__init_license(obj,
828 						       data->d_buf,
829 						       data->d_size);
830 		else if (strcmp(name, "version") == 0)
831 			err = bpf_object__init_kversion(obj,
832 							data->d_buf,
833 							data->d_size);
834 		else if (strcmp(name, "maps") == 0)
835 			obj->efile.maps_shndx = idx;
836 		else if (strcmp(name, BTF_ELF_SEC) == 0) {
837 			obj->btf = btf__new(data->d_buf, data->d_size);
838 			if (IS_ERR(obj->btf) || btf__load(obj->btf)) {
839 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
840 					   BTF_ELF_SEC, PTR_ERR(obj->btf));
841 				if (!IS_ERR(obj->btf))
842 					btf__free(obj->btf);
843 				obj->btf = NULL;
844 			}
845 		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
846 			btf_ext_data = data;
847 		} else if (sh.sh_type == SHT_SYMTAB) {
848 			if (obj->efile.symbols) {
849 				pr_warning("bpf: multiple SYMTAB in %s\n",
850 					   obj->path);
851 				err = -LIBBPF_ERRNO__FORMAT;
852 			} else {
853 				obj->efile.symbols = data;
854 				obj->efile.strtabidx = sh.sh_link;
855 			}
856 		} else if ((sh.sh_type == SHT_PROGBITS) &&
857 			   (sh.sh_flags & SHF_EXECINSTR) &&
858 			   (data->d_size > 0)) {
859 			if (strcmp(name, ".text") == 0)
860 				obj->efile.text_shndx = idx;
861 			err = bpf_object__add_program(obj, data->d_buf,
862 						      data->d_size, name, idx);
863 			if (err) {
864 				char errmsg[STRERR_BUFSIZE];
865 				char *cp = libbpf_strerror_r(-err, errmsg,
866 							     sizeof(errmsg));
867 
868 				pr_warning("failed to alloc program %s (%s): %s",
869 					   name, obj->path, cp);
870 			}
871 		} else if (sh.sh_type == SHT_REL) {
872 			void *reloc = obj->efile.reloc;
873 			int nr_reloc = obj->efile.nr_reloc + 1;
874 			int sec = sh.sh_info; /* points to other section */
875 
876 			/* Only do relo for section with exec instructions */
877 			if (!section_have_execinstr(obj, sec)) {
878 				pr_debug("skip relo %s(%d) for section(%d)\n",
879 					 name, idx, sec);
880 				continue;
881 			}
882 
883 			reloc = reallocarray(reloc, nr_reloc,
884 					     sizeof(*obj->efile.reloc));
885 			if (!reloc) {
886 				pr_warning("realloc failed\n");
887 				err = -ENOMEM;
888 			} else {
889 				int n = nr_reloc - 1;
890 
891 				obj->efile.reloc = reloc;
892 				obj->efile.nr_reloc = nr_reloc;
893 
894 				obj->efile.reloc[n].shdr = sh;
895 				obj->efile.reloc[n].data = data;
896 			}
897 		} else {
898 			pr_debug("skip section(%d) %s\n", idx, name);
899 		}
900 		if (err)
901 			goto out;
902 	}
903 
904 	if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
905 		pr_warning("Corrupted ELF file: index of strtab invalid\n");
906 		return LIBBPF_ERRNO__FORMAT;
907 	}
908 	if (btf_ext_data) {
909 		if (!obj->btf) {
910 			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
911 				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
912 		} else {
913 			obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
914 						    btf_ext_data->d_size);
915 			if (IS_ERR(obj->btf_ext)) {
916 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
917 					   BTF_EXT_ELF_SEC,
918 					   PTR_ERR(obj->btf_ext));
919 				obj->btf_ext = NULL;
920 			}
921 		}
922 	}
923 	if (obj->efile.maps_shndx >= 0) {
924 		err = bpf_object__init_maps(obj, flags);
925 		if (err)
926 			goto out;
927 	}
928 	err = bpf_object__init_prog_names(obj);
929 out:
930 	return err;
931 }
932 
933 static struct bpf_program *
934 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
935 {
936 	struct bpf_program *prog;
937 	size_t i;
938 
939 	for (i = 0; i < obj->nr_programs; i++) {
940 		prog = &obj->programs[i];
941 		if (prog->idx == idx)
942 			return prog;
943 	}
944 	return NULL;
945 }
946 
947 struct bpf_program *
948 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
949 {
950 	struct bpf_program *pos;
951 
952 	bpf_object__for_each_program(pos, obj) {
953 		if (pos->section_name && !strcmp(pos->section_name, title))
954 			return pos;
955 	}
956 	return NULL;
957 }
958 
959 static int
960 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
961 			   Elf_Data *data, struct bpf_object *obj)
962 {
963 	Elf_Data *symbols = obj->efile.symbols;
964 	int text_shndx = obj->efile.text_shndx;
965 	int maps_shndx = obj->efile.maps_shndx;
966 	struct bpf_map *maps = obj->maps;
967 	size_t nr_maps = obj->nr_maps;
968 	int i, nrels;
969 
970 	pr_debug("collecting relocating info for: '%s'\n",
971 		 prog->section_name);
972 	nrels = shdr->sh_size / shdr->sh_entsize;
973 
974 	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
975 	if (!prog->reloc_desc) {
976 		pr_warning("failed to alloc memory in relocation\n");
977 		return -ENOMEM;
978 	}
979 	prog->nr_reloc = nrels;
980 
981 	for (i = 0; i < nrels; i++) {
982 		GElf_Sym sym;
983 		GElf_Rel rel;
984 		unsigned int insn_idx;
985 		struct bpf_insn *insns = prog->insns;
986 		size_t map_idx;
987 
988 		if (!gelf_getrel(data, i, &rel)) {
989 			pr_warning("relocation: failed to get %d reloc\n", i);
990 			return -LIBBPF_ERRNO__FORMAT;
991 		}
992 
993 		if (!gelf_getsym(symbols,
994 				 GELF_R_SYM(rel.r_info),
995 				 &sym)) {
996 			pr_warning("relocation: symbol %"PRIx64" not found\n",
997 				   GELF_R_SYM(rel.r_info));
998 			return -LIBBPF_ERRNO__FORMAT;
999 		}
1000 		pr_debug("relo for %lld value %lld name %d\n",
1001 			 (long long) (rel.r_info >> 32),
1002 			 (long long) sym.st_value, sym.st_name);
1003 
1004 		if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
1005 			pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
1006 				   prog->section_name, sym.st_shndx);
1007 			return -LIBBPF_ERRNO__RELOC;
1008 		}
1009 
1010 		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1011 		pr_debug("relocation: insn_idx=%u\n", insn_idx);
1012 
1013 		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1014 			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1015 				pr_warning("incorrect bpf_call opcode\n");
1016 				return -LIBBPF_ERRNO__RELOC;
1017 			}
1018 			prog->reloc_desc[i].type = RELO_CALL;
1019 			prog->reloc_desc[i].insn_idx = insn_idx;
1020 			prog->reloc_desc[i].text_off = sym.st_value;
1021 			obj->has_pseudo_calls = true;
1022 			continue;
1023 		}
1024 
1025 		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1026 			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1027 				   insn_idx, insns[insn_idx].code);
1028 			return -LIBBPF_ERRNO__RELOC;
1029 		}
1030 
1031 		/* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
1032 		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1033 			if (maps[map_idx].offset == sym.st_value) {
1034 				pr_debug("relocation: find map %zd (%s) for insn %u\n",
1035 					 map_idx, maps[map_idx].name, insn_idx);
1036 				break;
1037 			}
1038 		}
1039 
1040 		if (map_idx >= nr_maps) {
1041 			pr_warning("bpf relocation: map_idx %d large than %d\n",
1042 				   (int)map_idx, (int)nr_maps - 1);
1043 			return -LIBBPF_ERRNO__RELOC;
1044 		}
1045 
1046 		prog->reloc_desc[i].type = RELO_LD64;
1047 		prog->reloc_desc[i].insn_idx = insn_idx;
1048 		prog->reloc_desc[i].map_idx = map_idx;
1049 	}
1050 	return 0;
1051 }
1052 
1053 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1054 {
1055 	struct bpf_map_def *def = &map->def;
1056 	__u32 key_type_id, value_type_id;
1057 	int ret;
1058 
1059 	ret = btf__get_map_kv_tids(btf, map->name, def->key_size,
1060 				   def->value_size, &key_type_id,
1061 				   &value_type_id);
1062 	if (ret)
1063 		return ret;
1064 
1065 	map->btf_key_type_id = key_type_id;
1066 	map->btf_value_type_id = value_type_id;
1067 
1068 	return 0;
1069 }
1070 
1071 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1072 {
1073 	struct bpf_map_info info = {};
1074 	__u32 len = sizeof(info);
1075 	int new_fd, err;
1076 	char *new_name;
1077 
1078 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
1079 	if (err)
1080 		return err;
1081 
1082 	new_name = strdup(info.name);
1083 	if (!new_name)
1084 		return -errno;
1085 
1086 	new_fd = open("/", O_RDONLY | O_CLOEXEC);
1087 	if (new_fd < 0)
1088 		goto err_free_new_name;
1089 
1090 	new_fd = dup3(fd, new_fd, O_CLOEXEC);
1091 	if (new_fd < 0)
1092 		goto err_close_new_fd;
1093 
1094 	err = zclose(map->fd);
1095 	if (err)
1096 		goto err_close_new_fd;
1097 	free(map->name);
1098 
1099 	map->fd = new_fd;
1100 	map->name = new_name;
1101 	map->def.type = info.type;
1102 	map->def.key_size = info.key_size;
1103 	map->def.value_size = info.value_size;
1104 	map->def.max_entries = info.max_entries;
1105 	map->def.map_flags = info.map_flags;
1106 	map->btf_key_type_id = info.btf_key_type_id;
1107 	map->btf_value_type_id = info.btf_value_type_id;
1108 
1109 	return 0;
1110 
1111 err_close_new_fd:
1112 	close(new_fd);
1113 err_free_new_name:
1114 	free(new_name);
1115 	return -errno;
1116 }
1117 
1118 int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
1119 {
1120 	if (!map || !max_entries)
1121 		return -EINVAL;
1122 
1123 	/* If map already created, its attributes can't be changed. */
1124 	if (map->fd >= 0)
1125 		return -EBUSY;
1126 
1127 	map->def.max_entries = max_entries;
1128 
1129 	return 0;
1130 }
1131 
1132 static int
1133 bpf_object__probe_name(struct bpf_object *obj)
1134 {
1135 	struct bpf_load_program_attr attr;
1136 	char *cp, errmsg[STRERR_BUFSIZE];
1137 	struct bpf_insn insns[] = {
1138 		BPF_MOV64_IMM(BPF_REG_0, 0),
1139 		BPF_EXIT_INSN(),
1140 	};
1141 	int ret;
1142 
1143 	/* make sure basic loading works */
1144 
1145 	memset(&attr, 0, sizeof(attr));
1146 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1147 	attr.insns = insns;
1148 	attr.insns_cnt = ARRAY_SIZE(insns);
1149 	attr.license = "GPL";
1150 
1151 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1152 	if (ret < 0) {
1153 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1154 		pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1155 			   __func__, cp, errno);
1156 		return -errno;
1157 	}
1158 	close(ret);
1159 
1160 	/* now try the same program, but with the name */
1161 
1162 	attr.name = "test";
1163 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1164 	if (ret >= 0) {
1165 		obj->caps.name = 1;
1166 		close(ret);
1167 	}
1168 
1169 	return 0;
1170 }
1171 
1172 static int
1173 bpf_object__probe_caps(struct bpf_object *obj)
1174 {
1175 	return bpf_object__probe_name(obj);
1176 }
1177 
1178 static int
1179 bpf_object__create_maps(struct bpf_object *obj)
1180 {
1181 	struct bpf_create_map_attr create_attr = {};
1182 	unsigned int i;
1183 	int err;
1184 
1185 	for (i = 0; i < obj->nr_maps; i++) {
1186 		struct bpf_map *map = &obj->maps[i];
1187 		struct bpf_map_def *def = &map->def;
1188 		char *cp, errmsg[STRERR_BUFSIZE];
1189 		int *pfd = &map->fd;
1190 
1191 		if (map->fd >= 0) {
1192 			pr_debug("skip map create (preset) %s: fd=%d\n",
1193 				 map->name, map->fd);
1194 			continue;
1195 		}
1196 
1197 		if (obj->caps.name)
1198 			create_attr.name = map->name;
1199 		create_attr.map_ifindex = map->map_ifindex;
1200 		create_attr.map_type = def->type;
1201 		create_attr.map_flags = def->map_flags;
1202 		create_attr.key_size = def->key_size;
1203 		create_attr.value_size = def->value_size;
1204 		create_attr.max_entries = def->max_entries;
1205 		create_attr.btf_fd = 0;
1206 		create_attr.btf_key_type_id = 0;
1207 		create_attr.btf_value_type_id = 0;
1208 		if (bpf_map_type__is_map_in_map(def->type) &&
1209 		    map->inner_map_fd >= 0)
1210 			create_attr.inner_map_fd = map->inner_map_fd;
1211 
1212 		if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1213 			create_attr.btf_fd = btf__fd(obj->btf);
1214 			create_attr.btf_key_type_id = map->btf_key_type_id;
1215 			create_attr.btf_value_type_id = map->btf_value_type_id;
1216 		}
1217 
1218 		*pfd = bpf_create_map_xattr(&create_attr);
1219 		if (*pfd < 0 && create_attr.btf_key_type_id) {
1220 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1221 			pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1222 				   map->name, cp, errno);
1223 			create_attr.btf_fd = 0;
1224 			create_attr.btf_key_type_id = 0;
1225 			create_attr.btf_value_type_id = 0;
1226 			map->btf_key_type_id = 0;
1227 			map->btf_value_type_id = 0;
1228 			*pfd = bpf_create_map_xattr(&create_attr);
1229 		}
1230 
1231 		if (*pfd < 0) {
1232 			size_t j;
1233 
1234 			err = *pfd;
1235 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1236 			pr_warning("failed to create map (name: '%s'): %s\n",
1237 				   map->name, cp);
1238 			for (j = 0; j < i; j++)
1239 				zclose(obj->maps[j].fd);
1240 			return err;
1241 		}
1242 		pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1243 	}
1244 
1245 	return 0;
1246 }
1247 
1248 static int
1249 check_btf_ext_reloc_err(struct bpf_program *prog, int err,
1250 			void *btf_prog_info, const char *info_name)
1251 {
1252 	if (err != -ENOENT) {
1253 		pr_warning("Error in loading %s for sec %s.\n",
1254 			   info_name, prog->section_name);
1255 		return err;
1256 	}
1257 
1258 	/* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
1259 
1260 	if (btf_prog_info) {
1261 		/*
1262 		 * Some info has already been found but has problem
1263 		 * in the last btf_ext reloc.  Must have to error
1264 		 * out.
1265 		 */
1266 		pr_warning("Error in relocating %s for sec %s.\n",
1267 			   info_name, prog->section_name);
1268 		return err;
1269 	}
1270 
1271 	/*
1272 	 * Have problem loading the very first info.  Ignore
1273 	 * the rest.
1274 	 */
1275 	pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
1276 		   info_name, prog->section_name, info_name);
1277 	return 0;
1278 }
1279 
1280 static int
1281 bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
1282 			  const char *section_name,  __u32 insn_offset)
1283 {
1284 	int err;
1285 
1286 	if (!insn_offset || prog->func_info) {
1287 		/*
1288 		 * !insn_offset => main program
1289 		 *
1290 		 * For sub prog, the main program's func_info has to
1291 		 * be loaded first (i.e. prog->func_info != NULL)
1292 		 */
1293 		err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
1294 					       section_name, insn_offset,
1295 					       &prog->func_info,
1296 					       &prog->func_info_cnt);
1297 		if (err)
1298 			return check_btf_ext_reloc_err(prog, err,
1299 						       prog->func_info,
1300 						       "bpf_func_info");
1301 
1302 		prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
1303 	}
1304 
1305 	if (!insn_offset || prog->line_info) {
1306 		err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
1307 					       section_name, insn_offset,
1308 					       &prog->line_info,
1309 					       &prog->line_info_cnt);
1310 		if (err)
1311 			return check_btf_ext_reloc_err(prog, err,
1312 						       prog->line_info,
1313 						       "bpf_line_info");
1314 
1315 		prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
1316 	}
1317 
1318 	if (!insn_offset)
1319 		prog->btf_fd = btf__fd(obj->btf);
1320 
1321 	return 0;
1322 }
1323 
1324 static int
1325 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1326 			struct reloc_desc *relo)
1327 {
1328 	struct bpf_insn *insn, *new_insn;
1329 	struct bpf_program *text;
1330 	size_t new_cnt;
1331 	int err;
1332 
1333 	if (relo->type != RELO_CALL)
1334 		return -LIBBPF_ERRNO__RELOC;
1335 
1336 	if (prog->idx == obj->efile.text_shndx) {
1337 		pr_warning("relo in .text insn %d into off %d\n",
1338 			   relo->insn_idx, relo->text_off);
1339 		return -LIBBPF_ERRNO__RELOC;
1340 	}
1341 
1342 	if (prog->main_prog_cnt == 0) {
1343 		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1344 		if (!text) {
1345 			pr_warning("no .text section found yet relo into text exist\n");
1346 			return -LIBBPF_ERRNO__RELOC;
1347 		}
1348 		new_cnt = prog->insns_cnt + text->insns_cnt;
1349 		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
1350 		if (!new_insn) {
1351 			pr_warning("oom in prog realloc\n");
1352 			return -ENOMEM;
1353 		}
1354 
1355 		if (obj->btf_ext) {
1356 			err = bpf_program_reloc_btf_ext(prog, obj,
1357 							text->section_name,
1358 							prog->insns_cnt);
1359 			if (err)
1360 				return err;
1361 		}
1362 
1363 		memcpy(new_insn + prog->insns_cnt, text->insns,
1364 		       text->insns_cnt * sizeof(*insn));
1365 		prog->insns = new_insn;
1366 		prog->main_prog_cnt = prog->insns_cnt;
1367 		prog->insns_cnt = new_cnt;
1368 		pr_debug("added %zd insn from %s to prog %s\n",
1369 			 text->insns_cnt, text->section_name,
1370 			 prog->section_name);
1371 	}
1372 	insn = &prog->insns[relo->insn_idx];
1373 	insn->imm += prog->main_prog_cnt - relo->insn_idx;
1374 	return 0;
1375 }
1376 
1377 static int
1378 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1379 {
1380 	int i, err;
1381 
1382 	if (!prog)
1383 		return 0;
1384 
1385 	if (obj->btf_ext) {
1386 		err = bpf_program_reloc_btf_ext(prog, obj,
1387 						prog->section_name, 0);
1388 		if (err)
1389 			return err;
1390 	}
1391 
1392 	if (!prog->reloc_desc)
1393 		return 0;
1394 
1395 	for (i = 0; i < prog->nr_reloc; i++) {
1396 		if (prog->reloc_desc[i].type == RELO_LD64) {
1397 			struct bpf_insn *insns = prog->insns;
1398 			int insn_idx, map_idx;
1399 
1400 			insn_idx = prog->reloc_desc[i].insn_idx;
1401 			map_idx = prog->reloc_desc[i].map_idx;
1402 
1403 			if (insn_idx >= (int)prog->insns_cnt) {
1404 				pr_warning("relocation out of range: '%s'\n",
1405 					   prog->section_name);
1406 				return -LIBBPF_ERRNO__RELOC;
1407 			}
1408 			insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1409 			insns[insn_idx].imm = obj->maps[map_idx].fd;
1410 		} else {
1411 			err = bpf_program__reloc_text(prog, obj,
1412 						      &prog->reloc_desc[i]);
1413 			if (err)
1414 				return err;
1415 		}
1416 	}
1417 
1418 	zfree(&prog->reloc_desc);
1419 	prog->nr_reloc = 0;
1420 	return 0;
1421 }
1422 
1423 
1424 static int
1425 bpf_object__relocate(struct bpf_object *obj)
1426 {
1427 	struct bpf_program *prog;
1428 	size_t i;
1429 	int err;
1430 
1431 	for (i = 0; i < obj->nr_programs; i++) {
1432 		prog = &obj->programs[i];
1433 
1434 		err = bpf_program__relocate(prog, obj);
1435 		if (err) {
1436 			pr_warning("failed to relocate '%s'\n",
1437 				   prog->section_name);
1438 			return err;
1439 		}
1440 	}
1441 	return 0;
1442 }
1443 
1444 static int bpf_object__collect_reloc(struct bpf_object *obj)
1445 {
1446 	int i, err;
1447 
1448 	if (!obj_elf_valid(obj)) {
1449 		pr_warning("Internal error: elf object is closed\n");
1450 		return -LIBBPF_ERRNO__INTERNAL;
1451 	}
1452 
1453 	for (i = 0; i < obj->efile.nr_reloc; i++) {
1454 		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1455 		Elf_Data *data = obj->efile.reloc[i].data;
1456 		int idx = shdr->sh_info;
1457 		struct bpf_program *prog;
1458 
1459 		if (shdr->sh_type != SHT_REL) {
1460 			pr_warning("internal error at %d\n", __LINE__);
1461 			return -LIBBPF_ERRNO__INTERNAL;
1462 		}
1463 
1464 		prog = bpf_object__find_prog_by_idx(obj, idx);
1465 		if (!prog) {
1466 			pr_warning("relocation failed: no section(%d)\n", idx);
1467 			return -LIBBPF_ERRNO__RELOC;
1468 		}
1469 
1470 		err = bpf_program__collect_reloc(prog,
1471 						 shdr, data,
1472 						 obj);
1473 		if (err)
1474 			return err;
1475 	}
1476 	return 0;
1477 }
1478 
1479 static int
1480 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
1481 	     char *license, __u32 kern_version, int *pfd)
1482 {
1483 	struct bpf_load_program_attr load_attr;
1484 	char *cp, errmsg[STRERR_BUFSIZE];
1485 	char *log_buf;
1486 	int ret;
1487 
1488 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1489 	load_attr.prog_type = prog->type;
1490 	load_attr.expected_attach_type = prog->expected_attach_type;
1491 	if (prog->caps->name)
1492 		load_attr.name = prog->name;
1493 	load_attr.insns = insns;
1494 	load_attr.insns_cnt = insns_cnt;
1495 	load_attr.license = license;
1496 	load_attr.kern_version = kern_version;
1497 	load_attr.prog_ifindex = prog->prog_ifindex;
1498 	load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
1499 	load_attr.func_info = prog->func_info;
1500 	load_attr.func_info_rec_size = prog->func_info_rec_size;
1501 	load_attr.func_info_cnt = prog->func_info_cnt;
1502 	load_attr.line_info = prog->line_info;
1503 	load_attr.line_info_rec_size = prog->line_info_rec_size;
1504 	load_attr.line_info_cnt = prog->line_info_cnt;
1505 	if (!load_attr.insns || !load_attr.insns_cnt)
1506 		return -EINVAL;
1507 
1508 	log_buf = malloc(BPF_LOG_BUF_SIZE);
1509 	if (!log_buf)
1510 		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1511 
1512 	ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
1513 
1514 	if (ret >= 0) {
1515 		*pfd = ret;
1516 		ret = 0;
1517 		goto out;
1518 	}
1519 
1520 	ret = -LIBBPF_ERRNO__LOAD;
1521 	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1522 	pr_warning("load bpf program failed: %s\n", cp);
1523 
1524 	if (log_buf && log_buf[0] != '\0') {
1525 		ret = -LIBBPF_ERRNO__VERIFY;
1526 		pr_warning("-- BEGIN DUMP LOG ---\n");
1527 		pr_warning("\n%s\n", log_buf);
1528 		pr_warning("-- END LOG --\n");
1529 	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1530 		pr_warning("Program too large (%zu insns), at most %d insns\n",
1531 			   load_attr.insns_cnt, BPF_MAXINSNS);
1532 		ret = -LIBBPF_ERRNO__PROG2BIG;
1533 	} else {
1534 		/* Wrong program type? */
1535 		if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1536 			int fd;
1537 
1538 			load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1539 			load_attr.expected_attach_type = 0;
1540 			fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1541 			if (fd >= 0) {
1542 				close(fd);
1543 				ret = -LIBBPF_ERRNO__PROGTYPE;
1544 				goto out;
1545 			}
1546 		}
1547 
1548 		if (log_buf)
1549 			ret = -LIBBPF_ERRNO__KVER;
1550 	}
1551 
1552 out:
1553 	free(log_buf);
1554 	return ret;
1555 }
1556 
1557 int
1558 bpf_program__load(struct bpf_program *prog,
1559 		  char *license, __u32 kern_version)
1560 {
1561 	int err = 0, fd, i;
1562 
1563 	if (prog->instances.nr < 0 || !prog->instances.fds) {
1564 		if (prog->preprocessor) {
1565 			pr_warning("Internal error: can't load program '%s'\n",
1566 				   prog->section_name);
1567 			return -LIBBPF_ERRNO__INTERNAL;
1568 		}
1569 
1570 		prog->instances.fds = malloc(sizeof(int));
1571 		if (!prog->instances.fds) {
1572 			pr_warning("Not enough memory for BPF fds\n");
1573 			return -ENOMEM;
1574 		}
1575 		prog->instances.nr = 1;
1576 		prog->instances.fds[0] = -1;
1577 	}
1578 
1579 	if (!prog->preprocessor) {
1580 		if (prog->instances.nr != 1) {
1581 			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1582 				   prog->section_name, prog->instances.nr);
1583 		}
1584 		err = load_program(prog, prog->insns, prog->insns_cnt,
1585 				   license, kern_version, &fd);
1586 		if (!err)
1587 			prog->instances.fds[0] = fd;
1588 		goto out;
1589 	}
1590 
1591 	for (i = 0; i < prog->instances.nr; i++) {
1592 		struct bpf_prog_prep_result result;
1593 		bpf_program_prep_t preprocessor = prog->preprocessor;
1594 
1595 		memset(&result, 0, sizeof(result));
1596 		err = preprocessor(prog, i, prog->insns,
1597 				   prog->insns_cnt, &result);
1598 		if (err) {
1599 			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1600 				   i, prog->section_name);
1601 			goto out;
1602 		}
1603 
1604 		if (!result.new_insn_ptr || !result.new_insn_cnt) {
1605 			pr_debug("Skip loading the %dth instance of program '%s'\n",
1606 				 i, prog->section_name);
1607 			prog->instances.fds[i] = -1;
1608 			if (result.pfd)
1609 				*result.pfd = -1;
1610 			continue;
1611 		}
1612 
1613 		err = load_program(prog, result.new_insn_ptr,
1614 				   result.new_insn_cnt,
1615 				   license, kern_version, &fd);
1616 
1617 		if (err) {
1618 			pr_warning("Loading the %dth instance of program '%s' failed\n",
1619 					i, prog->section_name);
1620 			goto out;
1621 		}
1622 
1623 		if (result.pfd)
1624 			*result.pfd = fd;
1625 		prog->instances.fds[i] = fd;
1626 	}
1627 out:
1628 	if (err)
1629 		pr_warning("failed to load program '%s'\n",
1630 			   prog->section_name);
1631 	zfree(&prog->insns);
1632 	prog->insns_cnt = 0;
1633 	return err;
1634 }
1635 
1636 static bool bpf_program__is_function_storage(struct bpf_program *prog,
1637 					     struct bpf_object *obj)
1638 {
1639 	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1640 }
1641 
1642 static int
1643 bpf_object__load_progs(struct bpf_object *obj)
1644 {
1645 	size_t i;
1646 	int err;
1647 
1648 	for (i = 0; i < obj->nr_programs; i++) {
1649 		if (bpf_program__is_function_storage(&obj->programs[i], obj))
1650 			continue;
1651 		err = bpf_program__load(&obj->programs[i],
1652 					obj->license,
1653 					obj->kern_version);
1654 		if (err)
1655 			return err;
1656 	}
1657 	return 0;
1658 }
1659 
1660 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
1661 {
1662 	switch (type) {
1663 	case BPF_PROG_TYPE_SOCKET_FILTER:
1664 	case BPF_PROG_TYPE_SCHED_CLS:
1665 	case BPF_PROG_TYPE_SCHED_ACT:
1666 	case BPF_PROG_TYPE_XDP:
1667 	case BPF_PROG_TYPE_CGROUP_SKB:
1668 	case BPF_PROG_TYPE_CGROUP_SOCK:
1669 	case BPF_PROG_TYPE_LWT_IN:
1670 	case BPF_PROG_TYPE_LWT_OUT:
1671 	case BPF_PROG_TYPE_LWT_XMIT:
1672 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1673 	case BPF_PROG_TYPE_SOCK_OPS:
1674 	case BPF_PROG_TYPE_SK_SKB:
1675 	case BPF_PROG_TYPE_CGROUP_DEVICE:
1676 	case BPF_PROG_TYPE_SK_MSG:
1677 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1678 	case BPF_PROG_TYPE_LIRC_MODE2:
1679 	case BPF_PROG_TYPE_SK_REUSEPORT:
1680 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1681 	case BPF_PROG_TYPE_UNSPEC:
1682 	case BPF_PROG_TYPE_TRACEPOINT:
1683 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
1684 	case BPF_PROG_TYPE_PERF_EVENT:
1685 		return false;
1686 	case BPF_PROG_TYPE_KPROBE:
1687 	default:
1688 		return true;
1689 	}
1690 }
1691 
1692 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1693 {
1694 	if (needs_kver && obj->kern_version == 0) {
1695 		pr_warning("%s doesn't provide kernel version\n",
1696 			   obj->path);
1697 		return -LIBBPF_ERRNO__KVERSION;
1698 	}
1699 	return 0;
1700 }
1701 
1702 static struct bpf_object *
1703 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1704 		   bool needs_kver, int flags)
1705 {
1706 	struct bpf_object *obj;
1707 	int err;
1708 
1709 	if (elf_version(EV_CURRENT) == EV_NONE) {
1710 		pr_warning("failed to init libelf for %s\n", path);
1711 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1712 	}
1713 
1714 	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1715 	if (IS_ERR(obj))
1716 		return obj;
1717 
1718 	CHECK_ERR(bpf_object__elf_init(obj), err, out);
1719 	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1720 	CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
1721 	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1722 	CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
1723 
1724 	bpf_object__elf_finish(obj);
1725 	return obj;
1726 out:
1727 	bpf_object__close(obj);
1728 	return ERR_PTR(err);
1729 }
1730 
1731 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1732 					    int flags)
1733 {
1734 	/* param validation */
1735 	if (!attr->file)
1736 		return NULL;
1737 
1738 	pr_debug("loading %s\n", attr->file);
1739 
1740 	return __bpf_object__open(attr->file, NULL, 0,
1741 				  bpf_prog_type__needs_kver(attr->prog_type),
1742 				  flags);
1743 }
1744 
1745 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1746 {
1747 	return __bpf_object__open_xattr(attr, 0);
1748 }
1749 
1750 struct bpf_object *bpf_object__open(const char *path)
1751 {
1752 	struct bpf_object_open_attr attr = {
1753 		.file		= path,
1754 		.prog_type	= BPF_PROG_TYPE_UNSPEC,
1755 	};
1756 
1757 	return bpf_object__open_xattr(&attr);
1758 }
1759 
1760 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1761 					   size_t obj_buf_sz,
1762 					   const char *name)
1763 {
1764 	char tmp_name[64];
1765 
1766 	/* param validation */
1767 	if (!obj_buf || obj_buf_sz <= 0)
1768 		return NULL;
1769 
1770 	if (!name) {
1771 		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1772 			 (unsigned long)obj_buf,
1773 			 (unsigned long)obj_buf_sz);
1774 		tmp_name[sizeof(tmp_name) - 1] = '\0';
1775 		name = tmp_name;
1776 	}
1777 	pr_debug("loading object '%s' from buffer\n",
1778 		 name);
1779 
1780 	return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
1781 }
1782 
1783 int bpf_object__unload(struct bpf_object *obj)
1784 {
1785 	size_t i;
1786 
1787 	if (!obj)
1788 		return -EINVAL;
1789 
1790 	for (i = 0; i < obj->nr_maps; i++)
1791 		zclose(obj->maps[i].fd);
1792 
1793 	for (i = 0; i < obj->nr_programs; i++)
1794 		bpf_program__unload(&obj->programs[i]);
1795 
1796 	return 0;
1797 }
1798 
1799 int bpf_object__load(struct bpf_object *obj)
1800 {
1801 	int err;
1802 
1803 	if (!obj)
1804 		return -EINVAL;
1805 
1806 	if (obj->loaded) {
1807 		pr_warning("object should not be loaded twice\n");
1808 		return -EINVAL;
1809 	}
1810 
1811 	obj->loaded = true;
1812 
1813 	CHECK_ERR(bpf_object__probe_caps(obj), err, out);
1814 	CHECK_ERR(bpf_object__create_maps(obj), err, out);
1815 	CHECK_ERR(bpf_object__relocate(obj), err, out);
1816 	CHECK_ERR(bpf_object__load_progs(obj), err, out);
1817 
1818 	return 0;
1819 out:
1820 	bpf_object__unload(obj);
1821 	pr_warning("failed to load object '%s'\n", obj->path);
1822 	return err;
1823 }
1824 
1825 static int check_path(const char *path)
1826 {
1827 	char *cp, errmsg[STRERR_BUFSIZE];
1828 	struct statfs st_fs;
1829 	char *dname, *dir;
1830 	int err = 0;
1831 
1832 	if (path == NULL)
1833 		return -EINVAL;
1834 
1835 	dname = strdup(path);
1836 	if (dname == NULL)
1837 		return -ENOMEM;
1838 
1839 	dir = dirname(dname);
1840 	if (statfs(dir, &st_fs)) {
1841 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1842 		pr_warning("failed to statfs %s: %s\n", dir, cp);
1843 		err = -errno;
1844 	}
1845 	free(dname);
1846 
1847 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1848 		pr_warning("specified path %s is not on BPF FS\n", path);
1849 		err = -EINVAL;
1850 	}
1851 
1852 	return err;
1853 }
1854 
1855 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1856 			      int instance)
1857 {
1858 	char *cp, errmsg[STRERR_BUFSIZE];
1859 	int err;
1860 
1861 	err = check_path(path);
1862 	if (err)
1863 		return err;
1864 
1865 	if (prog == NULL) {
1866 		pr_warning("invalid program pointer\n");
1867 		return -EINVAL;
1868 	}
1869 
1870 	if (instance < 0 || instance >= prog->instances.nr) {
1871 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1872 			   instance, prog->section_name, prog->instances.nr);
1873 		return -EINVAL;
1874 	}
1875 
1876 	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1877 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1878 		pr_warning("failed to pin program: %s\n", cp);
1879 		return -errno;
1880 	}
1881 	pr_debug("pinned program '%s'\n", path);
1882 
1883 	return 0;
1884 }
1885 
1886 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
1887 				int instance)
1888 {
1889 	int err;
1890 
1891 	err = check_path(path);
1892 	if (err)
1893 		return err;
1894 
1895 	if (prog == NULL) {
1896 		pr_warning("invalid program pointer\n");
1897 		return -EINVAL;
1898 	}
1899 
1900 	if (instance < 0 || instance >= prog->instances.nr) {
1901 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1902 			   instance, prog->section_name, prog->instances.nr);
1903 		return -EINVAL;
1904 	}
1905 
1906 	err = unlink(path);
1907 	if (err != 0)
1908 		return -errno;
1909 	pr_debug("unpinned program '%s'\n", path);
1910 
1911 	return 0;
1912 }
1913 
1914 static int make_dir(const char *path)
1915 {
1916 	char *cp, errmsg[STRERR_BUFSIZE];
1917 	int err = 0;
1918 
1919 	if (mkdir(path, 0700) && errno != EEXIST)
1920 		err = -errno;
1921 
1922 	if (err) {
1923 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
1924 		pr_warning("failed to mkdir %s: %s\n", path, cp);
1925 	}
1926 	return err;
1927 }
1928 
1929 int bpf_program__pin(struct bpf_program *prog, const char *path)
1930 {
1931 	int i, err;
1932 
1933 	err = check_path(path);
1934 	if (err)
1935 		return err;
1936 
1937 	if (prog == NULL) {
1938 		pr_warning("invalid program pointer\n");
1939 		return -EINVAL;
1940 	}
1941 
1942 	if (prog->instances.nr <= 0) {
1943 		pr_warning("no instances of prog %s to pin\n",
1944 			   prog->section_name);
1945 		return -EINVAL;
1946 	}
1947 
1948 	if (prog->instances.nr == 1) {
1949 		/* don't create subdirs when pinning single instance */
1950 		return bpf_program__pin_instance(prog, path, 0);
1951 	}
1952 
1953 	err = make_dir(path);
1954 	if (err)
1955 		return err;
1956 
1957 	for (i = 0; i < prog->instances.nr; i++) {
1958 		char buf[PATH_MAX];
1959 		int len;
1960 
1961 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1962 		if (len < 0) {
1963 			err = -EINVAL;
1964 			goto err_unpin;
1965 		} else if (len >= PATH_MAX) {
1966 			err = -ENAMETOOLONG;
1967 			goto err_unpin;
1968 		}
1969 
1970 		err = bpf_program__pin_instance(prog, buf, i);
1971 		if (err)
1972 			goto err_unpin;
1973 	}
1974 
1975 	return 0;
1976 
1977 err_unpin:
1978 	for (i = i - 1; i >= 0; i--) {
1979 		char buf[PATH_MAX];
1980 		int len;
1981 
1982 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1983 		if (len < 0)
1984 			continue;
1985 		else if (len >= PATH_MAX)
1986 			continue;
1987 
1988 		bpf_program__unpin_instance(prog, buf, i);
1989 	}
1990 
1991 	rmdir(path);
1992 
1993 	return err;
1994 }
1995 
1996 int bpf_program__unpin(struct bpf_program *prog, const char *path)
1997 {
1998 	int i, err;
1999 
2000 	err = check_path(path);
2001 	if (err)
2002 		return err;
2003 
2004 	if (prog == NULL) {
2005 		pr_warning("invalid program pointer\n");
2006 		return -EINVAL;
2007 	}
2008 
2009 	if (prog->instances.nr <= 0) {
2010 		pr_warning("no instances of prog %s to pin\n",
2011 			   prog->section_name);
2012 		return -EINVAL;
2013 	}
2014 
2015 	if (prog->instances.nr == 1) {
2016 		/* don't create subdirs when pinning single instance */
2017 		return bpf_program__unpin_instance(prog, path, 0);
2018 	}
2019 
2020 	for (i = 0; i < prog->instances.nr; i++) {
2021 		char buf[PATH_MAX];
2022 		int len;
2023 
2024 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2025 		if (len < 0)
2026 			return -EINVAL;
2027 		else if (len >= PATH_MAX)
2028 			return -ENAMETOOLONG;
2029 
2030 		err = bpf_program__unpin_instance(prog, buf, i);
2031 		if (err)
2032 			return err;
2033 	}
2034 
2035 	err = rmdir(path);
2036 	if (err)
2037 		return -errno;
2038 
2039 	return 0;
2040 }
2041 
2042 int bpf_map__pin(struct bpf_map *map, const char *path)
2043 {
2044 	char *cp, errmsg[STRERR_BUFSIZE];
2045 	int err;
2046 
2047 	err = check_path(path);
2048 	if (err)
2049 		return err;
2050 
2051 	if (map == NULL) {
2052 		pr_warning("invalid map pointer\n");
2053 		return -EINVAL;
2054 	}
2055 
2056 	if (bpf_obj_pin(map->fd, path)) {
2057 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2058 		pr_warning("failed to pin map: %s\n", cp);
2059 		return -errno;
2060 	}
2061 
2062 	pr_debug("pinned map '%s'\n", path);
2063 
2064 	return 0;
2065 }
2066 
2067 int bpf_map__unpin(struct bpf_map *map, const char *path)
2068 {
2069 	int err;
2070 
2071 	err = check_path(path);
2072 	if (err)
2073 		return err;
2074 
2075 	if (map == NULL) {
2076 		pr_warning("invalid map pointer\n");
2077 		return -EINVAL;
2078 	}
2079 
2080 	err = unlink(path);
2081 	if (err != 0)
2082 		return -errno;
2083 	pr_debug("unpinned map '%s'\n", path);
2084 
2085 	return 0;
2086 }
2087 
2088 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
2089 {
2090 	struct bpf_map *map;
2091 	int err;
2092 
2093 	if (!obj)
2094 		return -ENOENT;
2095 
2096 	if (!obj->loaded) {
2097 		pr_warning("object not yet loaded; load it first\n");
2098 		return -ENOENT;
2099 	}
2100 
2101 	err = make_dir(path);
2102 	if (err)
2103 		return err;
2104 
2105 	bpf_object__for_each_map(map, obj) {
2106 		char buf[PATH_MAX];
2107 		int len;
2108 
2109 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2110 			       bpf_map__name(map));
2111 		if (len < 0) {
2112 			err = -EINVAL;
2113 			goto err_unpin_maps;
2114 		} else if (len >= PATH_MAX) {
2115 			err = -ENAMETOOLONG;
2116 			goto err_unpin_maps;
2117 		}
2118 
2119 		err = bpf_map__pin(map, buf);
2120 		if (err)
2121 			goto err_unpin_maps;
2122 	}
2123 
2124 	return 0;
2125 
2126 err_unpin_maps:
2127 	while ((map = bpf_map__prev(map, obj))) {
2128 		char buf[PATH_MAX];
2129 		int len;
2130 
2131 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2132 			       bpf_map__name(map));
2133 		if (len < 0)
2134 			continue;
2135 		else if (len >= PATH_MAX)
2136 			continue;
2137 
2138 		bpf_map__unpin(map, buf);
2139 	}
2140 
2141 	return err;
2142 }
2143 
2144 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
2145 {
2146 	struct bpf_map *map;
2147 	int err;
2148 
2149 	if (!obj)
2150 		return -ENOENT;
2151 
2152 	bpf_object__for_each_map(map, obj) {
2153 		char buf[PATH_MAX];
2154 		int len;
2155 
2156 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2157 			       bpf_map__name(map));
2158 		if (len < 0)
2159 			return -EINVAL;
2160 		else if (len >= PATH_MAX)
2161 			return -ENAMETOOLONG;
2162 
2163 		err = bpf_map__unpin(map, buf);
2164 		if (err)
2165 			return err;
2166 	}
2167 
2168 	return 0;
2169 }
2170 
2171 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
2172 {
2173 	struct bpf_program *prog;
2174 	int err;
2175 
2176 	if (!obj)
2177 		return -ENOENT;
2178 
2179 	if (!obj->loaded) {
2180 		pr_warning("object not yet loaded; load it first\n");
2181 		return -ENOENT;
2182 	}
2183 
2184 	err = make_dir(path);
2185 	if (err)
2186 		return err;
2187 
2188 	bpf_object__for_each_program(prog, obj) {
2189 		char buf[PATH_MAX];
2190 		int len;
2191 
2192 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2193 			       prog->pin_name);
2194 		if (len < 0) {
2195 			err = -EINVAL;
2196 			goto err_unpin_programs;
2197 		} else if (len >= PATH_MAX) {
2198 			err = -ENAMETOOLONG;
2199 			goto err_unpin_programs;
2200 		}
2201 
2202 		err = bpf_program__pin(prog, buf);
2203 		if (err)
2204 			goto err_unpin_programs;
2205 	}
2206 
2207 	return 0;
2208 
2209 err_unpin_programs:
2210 	while ((prog = bpf_program__prev(prog, obj))) {
2211 		char buf[PATH_MAX];
2212 		int len;
2213 
2214 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2215 			       prog->pin_name);
2216 		if (len < 0)
2217 			continue;
2218 		else if (len >= PATH_MAX)
2219 			continue;
2220 
2221 		bpf_program__unpin(prog, buf);
2222 	}
2223 
2224 	return err;
2225 }
2226 
2227 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2228 {
2229 	struct bpf_program *prog;
2230 	int err;
2231 
2232 	if (!obj)
2233 		return -ENOENT;
2234 
2235 	bpf_object__for_each_program(prog, obj) {
2236 		char buf[PATH_MAX];
2237 		int len;
2238 
2239 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2240 			       prog->pin_name);
2241 		if (len < 0)
2242 			return -EINVAL;
2243 		else if (len >= PATH_MAX)
2244 			return -ENAMETOOLONG;
2245 
2246 		err = bpf_program__unpin(prog, buf);
2247 		if (err)
2248 			return err;
2249 	}
2250 
2251 	return 0;
2252 }
2253 
2254 int bpf_object__pin(struct bpf_object *obj, const char *path)
2255 {
2256 	int err;
2257 
2258 	err = bpf_object__pin_maps(obj, path);
2259 	if (err)
2260 		return err;
2261 
2262 	err = bpf_object__pin_programs(obj, path);
2263 	if (err) {
2264 		bpf_object__unpin_maps(obj, path);
2265 		return err;
2266 	}
2267 
2268 	return 0;
2269 }
2270 
2271 void bpf_object__close(struct bpf_object *obj)
2272 {
2273 	size_t i;
2274 
2275 	if (!obj)
2276 		return;
2277 
2278 	if (obj->clear_priv)
2279 		obj->clear_priv(obj, obj->priv);
2280 
2281 	bpf_object__elf_finish(obj);
2282 	bpf_object__unload(obj);
2283 	btf__free(obj->btf);
2284 	btf_ext__free(obj->btf_ext);
2285 
2286 	for (i = 0; i < obj->nr_maps; i++) {
2287 		zfree(&obj->maps[i].name);
2288 		if (obj->maps[i].clear_priv)
2289 			obj->maps[i].clear_priv(&obj->maps[i],
2290 						obj->maps[i].priv);
2291 		obj->maps[i].priv = NULL;
2292 		obj->maps[i].clear_priv = NULL;
2293 	}
2294 	zfree(&obj->maps);
2295 	obj->nr_maps = 0;
2296 
2297 	if (obj->programs && obj->nr_programs) {
2298 		for (i = 0; i < obj->nr_programs; i++)
2299 			bpf_program__exit(&obj->programs[i]);
2300 	}
2301 	zfree(&obj->programs);
2302 
2303 	list_del(&obj->list);
2304 	free(obj);
2305 }
2306 
2307 struct bpf_object *
2308 bpf_object__next(struct bpf_object *prev)
2309 {
2310 	struct bpf_object *next;
2311 
2312 	if (!prev)
2313 		next = list_first_entry(&bpf_objects_list,
2314 					struct bpf_object,
2315 					list);
2316 	else
2317 		next = list_next_entry(prev, list);
2318 
2319 	/* Empty list is noticed here so don't need checking on entry. */
2320 	if (&next->list == &bpf_objects_list)
2321 		return NULL;
2322 
2323 	return next;
2324 }
2325 
2326 const char *bpf_object__name(struct bpf_object *obj)
2327 {
2328 	return obj ? obj->path : ERR_PTR(-EINVAL);
2329 }
2330 
2331 unsigned int bpf_object__kversion(struct bpf_object *obj)
2332 {
2333 	return obj ? obj->kern_version : 0;
2334 }
2335 
2336 struct btf *bpf_object__btf(struct bpf_object *obj)
2337 {
2338 	return obj ? obj->btf : NULL;
2339 }
2340 
2341 int bpf_object__btf_fd(const struct bpf_object *obj)
2342 {
2343 	return obj->btf ? btf__fd(obj->btf) : -1;
2344 }
2345 
2346 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2347 			 bpf_object_clear_priv_t clear_priv)
2348 {
2349 	if (obj->priv && obj->clear_priv)
2350 		obj->clear_priv(obj, obj->priv);
2351 
2352 	obj->priv = priv;
2353 	obj->clear_priv = clear_priv;
2354 	return 0;
2355 }
2356 
2357 void *bpf_object__priv(struct bpf_object *obj)
2358 {
2359 	return obj ? obj->priv : ERR_PTR(-EINVAL);
2360 }
2361 
2362 static struct bpf_program *
2363 __bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
2364 {
2365 	size_t nr_programs = obj->nr_programs;
2366 	ssize_t idx;
2367 
2368 	if (!nr_programs)
2369 		return NULL;
2370 
2371 	if (!p)
2372 		/* Iter from the beginning */
2373 		return forward ? &obj->programs[0] :
2374 			&obj->programs[nr_programs - 1];
2375 
2376 	if (p->obj != obj) {
2377 		pr_warning("error: program handler doesn't match object\n");
2378 		return NULL;
2379 	}
2380 
2381 	idx = (p - obj->programs) + (forward ? 1 : -1);
2382 	if (idx >= obj->nr_programs || idx < 0)
2383 		return NULL;
2384 	return &obj->programs[idx];
2385 }
2386 
2387 struct bpf_program *
2388 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2389 {
2390 	struct bpf_program *prog = prev;
2391 
2392 	do {
2393 		prog = __bpf_program__iter(prog, obj, true);
2394 	} while (prog && bpf_program__is_function_storage(prog, obj));
2395 
2396 	return prog;
2397 }
2398 
2399 struct bpf_program *
2400 bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2401 {
2402 	struct bpf_program *prog = next;
2403 
2404 	do {
2405 		prog = __bpf_program__iter(prog, obj, false);
2406 	} while (prog && bpf_program__is_function_storage(prog, obj));
2407 
2408 	return prog;
2409 }
2410 
2411 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
2412 			  bpf_program_clear_priv_t clear_priv)
2413 {
2414 	if (prog->priv && prog->clear_priv)
2415 		prog->clear_priv(prog, prog->priv);
2416 
2417 	prog->priv = priv;
2418 	prog->clear_priv = clear_priv;
2419 	return 0;
2420 }
2421 
2422 void *bpf_program__priv(struct bpf_program *prog)
2423 {
2424 	return prog ? prog->priv : ERR_PTR(-EINVAL);
2425 }
2426 
2427 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
2428 {
2429 	prog->prog_ifindex = ifindex;
2430 }
2431 
2432 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
2433 {
2434 	const char *title;
2435 
2436 	title = prog->section_name;
2437 	if (needs_copy) {
2438 		title = strdup(title);
2439 		if (!title) {
2440 			pr_warning("failed to strdup program title\n");
2441 			return ERR_PTR(-ENOMEM);
2442 		}
2443 	}
2444 
2445 	return title;
2446 }
2447 
2448 int bpf_program__fd(struct bpf_program *prog)
2449 {
2450 	return bpf_program__nth_fd(prog, 0);
2451 }
2452 
2453 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
2454 			  bpf_program_prep_t prep)
2455 {
2456 	int *instances_fds;
2457 
2458 	if (nr_instances <= 0 || !prep)
2459 		return -EINVAL;
2460 
2461 	if (prog->instances.nr > 0 || prog->instances.fds) {
2462 		pr_warning("Can't set pre-processor after loading\n");
2463 		return -EINVAL;
2464 	}
2465 
2466 	instances_fds = malloc(sizeof(int) * nr_instances);
2467 	if (!instances_fds) {
2468 		pr_warning("alloc memory failed for fds\n");
2469 		return -ENOMEM;
2470 	}
2471 
2472 	/* fill all fd with -1 */
2473 	memset(instances_fds, -1, sizeof(int) * nr_instances);
2474 
2475 	prog->instances.nr = nr_instances;
2476 	prog->instances.fds = instances_fds;
2477 	prog->preprocessor = prep;
2478 	return 0;
2479 }
2480 
2481 int bpf_program__nth_fd(struct bpf_program *prog, int n)
2482 {
2483 	int fd;
2484 
2485 	if (!prog)
2486 		return -EINVAL;
2487 
2488 	if (n >= prog->instances.nr || n < 0) {
2489 		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2490 			   n, prog->section_name, prog->instances.nr);
2491 		return -EINVAL;
2492 	}
2493 
2494 	fd = prog->instances.fds[n];
2495 	if (fd < 0) {
2496 		pr_warning("%dth instance of program '%s' is invalid\n",
2497 			   n, prog->section_name);
2498 		return -ENOENT;
2499 	}
2500 
2501 	return fd;
2502 }
2503 
2504 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
2505 {
2506 	prog->type = type;
2507 }
2508 
2509 static bool bpf_program__is_type(struct bpf_program *prog,
2510 				 enum bpf_prog_type type)
2511 {
2512 	return prog ? (prog->type == type) : false;
2513 }
2514 
2515 #define BPF_PROG_TYPE_FNS(NAME, TYPE)			\
2516 int bpf_program__set_##NAME(struct bpf_program *prog)	\
2517 {							\
2518 	if (!prog)					\
2519 		return -EINVAL;				\
2520 	bpf_program__set_type(prog, TYPE);		\
2521 	return 0;					\
2522 }							\
2523 							\
2524 bool bpf_program__is_##NAME(struct bpf_program *prog)	\
2525 {							\
2526 	return bpf_program__is_type(prog, TYPE);	\
2527 }							\
2528 
2529 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
2530 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
2531 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2532 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
2533 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
2534 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
2535 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2536 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
2537 
2538 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2539 					   enum bpf_attach_type type)
2540 {
2541 	prog->expected_attach_type = type;
2542 }
2543 
2544 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2545 	{ string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
2546 
2547 /* Programs that can NOT be attached. */
2548 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
2549 
2550 /* Programs that can be attached. */
2551 #define BPF_APROG_SEC(string, ptype, atype) \
2552 	BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
2553 
2554 /* Programs that must specify expected attach type at load time. */
2555 #define BPF_EAPROG_SEC(string, ptype, eatype) \
2556 	BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
2557 
2558 /* Programs that can be attached but attach type can't be identified by section
2559  * name. Kept for backward compatibility.
2560  */
2561 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
2562 
2563 static const struct {
2564 	const char *sec;
2565 	size_t len;
2566 	enum bpf_prog_type prog_type;
2567 	enum bpf_attach_type expected_attach_type;
2568 	int is_attachable;
2569 	enum bpf_attach_type attach_type;
2570 } section_names[] = {
2571 	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
2572 	BPF_PROG_SEC("kprobe/",			BPF_PROG_TYPE_KPROBE),
2573 	BPF_PROG_SEC("kretprobe/",		BPF_PROG_TYPE_KPROBE),
2574 	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
2575 	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
2576 	BPF_PROG_SEC("tracepoint/",		BPF_PROG_TYPE_TRACEPOINT),
2577 	BPF_PROG_SEC("raw_tracepoint/",		BPF_PROG_TYPE_RAW_TRACEPOINT),
2578 	BPF_PROG_SEC("xdp",			BPF_PROG_TYPE_XDP),
2579 	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
2580 	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
2581 	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
2582 	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
2583 	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
2584 	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
2585 						BPF_CGROUP_INET_INGRESS),
2586 	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
2587 						BPF_CGROUP_INET_EGRESS),
2588 	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
2589 	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
2590 						BPF_CGROUP_INET_SOCK_CREATE),
2591 	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
2592 						BPF_CGROUP_INET4_POST_BIND),
2593 	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
2594 						BPF_CGROUP_INET6_POST_BIND),
2595 	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
2596 						BPF_CGROUP_DEVICE),
2597 	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
2598 						BPF_CGROUP_SOCK_OPS),
2599 	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
2600 						BPF_SK_SKB_STREAM_PARSER),
2601 	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
2602 						BPF_SK_SKB_STREAM_VERDICT),
2603 	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
2604 	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
2605 						BPF_SK_MSG_VERDICT),
2606 	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
2607 						BPF_LIRC_MODE2),
2608 	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
2609 						BPF_FLOW_DISSECTOR),
2610 	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2611 						BPF_CGROUP_INET4_BIND),
2612 	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2613 						BPF_CGROUP_INET6_BIND),
2614 	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2615 						BPF_CGROUP_INET4_CONNECT),
2616 	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2617 						BPF_CGROUP_INET6_CONNECT),
2618 	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2619 						BPF_CGROUP_UDP4_SENDMSG),
2620 	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2621 						BPF_CGROUP_UDP6_SENDMSG),
2622 };
2623 
2624 #undef BPF_PROG_SEC_IMPL
2625 #undef BPF_PROG_SEC
2626 #undef BPF_APROG_SEC
2627 #undef BPF_EAPROG_SEC
2628 #undef BPF_APROG_COMPAT
2629 
2630 #define MAX_TYPE_NAME_SIZE 32
2631 
2632 static char *libbpf_get_type_names(bool attach_type)
2633 {
2634 	int i, len = ARRAY_SIZE(section_names) * MAX_TYPE_NAME_SIZE;
2635 	char *buf;
2636 
2637 	buf = malloc(len);
2638 	if (!buf)
2639 		return NULL;
2640 
2641 	buf[0] = '\0';
2642 	/* Forge string buf with all available names */
2643 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2644 		if (attach_type && !section_names[i].is_attachable)
2645 			continue;
2646 
2647 		if (strlen(buf) + strlen(section_names[i].sec) + 2 > len) {
2648 			free(buf);
2649 			return NULL;
2650 		}
2651 		strcat(buf, " ");
2652 		strcat(buf, section_names[i].sec);
2653 	}
2654 
2655 	return buf;
2656 }
2657 
2658 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2659 			     enum bpf_attach_type *expected_attach_type)
2660 {
2661 	char *type_names;
2662 	int i;
2663 
2664 	if (!name)
2665 		return -EINVAL;
2666 
2667 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2668 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2669 			continue;
2670 		*prog_type = section_names[i].prog_type;
2671 		*expected_attach_type = section_names[i].expected_attach_type;
2672 		return 0;
2673 	}
2674 	pr_warning("failed to guess program type based on ELF section name '%s'\n", name);
2675 	type_names = libbpf_get_type_names(false);
2676 	if (type_names != NULL) {
2677 		pr_info("supported section(type) names are:%s\n", type_names);
2678 		free(type_names);
2679 	}
2680 
2681 	return -EINVAL;
2682 }
2683 
2684 int libbpf_attach_type_by_name(const char *name,
2685 			       enum bpf_attach_type *attach_type)
2686 {
2687 	char *type_names;
2688 	int i;
2689 
2690 	if (!name)
2691 		return -EINVAL;
2692 
2693 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2694 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2695 			continue;
2696 		if (!section_names[i].is_attachable)
2697 			return -EINVAL;
2698 		*attach_type = section_names[i].attach_type;
2699 		return 0;
2700 	}
2701 	pr_warning("failed to guess attach type based on ELF section name '%s'\n", name);
2702 	type_names = libbpf_get_type_names(true);
2703 	if (type_names != NULL) {
2704 		pr_info("attachable section(type) names are:%s\n", type_names);
2705 		free(type_names);
2706 	}
2707 
2708 	return -EINVAL;
2709 }
2710 
2711 static int
2712 bpf_program__identify_section(struct bpf_program *prog,
2713 			      enum bpf_prog_type *prog_type,
2714 			      enum bpf_attach_type *expected_attach_type)
2715 {
2716 	return libbpf_prog_type_by_name(prog->section_name, prog_type,
2717 					expected_attach_type);
2718 }
2719 
2720 int bpf_map__fd(struct bpf_map *map)
2721 {
2722 	return map ? map->fd : -EINVAL;
2723 }
2724 
2725 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
2726 {
2727 	return map ? &map->def : ERR_PTR(-EINVAL);
2728 }
2729 
2730 const char *bpf_map__name(struct bpf_map *map)
2731 {
2732 	return map ? map->name : NULL;
2733 }
2734 
2735 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
2736 {
2737 	return map ? map->btf_key_type_id : 0;
2738 }
2739 
2740 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
2741 {
2742 	return map ? map->btf_value_type_id : 0;
2743 }
2744 
2745 int bpf_map__set_priv(struct bpf_map *map, void *priv,
2746 		     bpf_map_clear_priv_t clear_priv)
2747 {
2748 	if (!map)
2749 		return -EINVAL;
2750 
2751 	if (map->priv) {
2752 		if (map->clear_priv)
2753 			map->clear_priv(map, map->priv);
2754 	}
2755 
2756 	map->priv = priv;
2757 	map->clear_priv = clear_priv;
2758 	return 0;
2759 }
2760 
2761 void *bpf_map__priv(struct bpf_map *map)
2762 {
2763 	return map ? map->priv : ERR_PTR(-EINVAL);
2764 }
2765 
2766 bool bpf_map__is_offload_neutral(struct bpf_map *map)
2767 {
2768 	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2769 }
2770 
2771 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2772 {
2773 	map->map_ifindex = ifindex;
2774 }
2775 
2776 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
2777 {
2778 	if (!bpf_map_type__is_map_in_map(map->def.type)) {
2779 		pr_warning("error: unsupported map type\n");
2780 		return -EINVAL;
2781 	}
2782 	if (map->inner_map_fd != -1) {
2783 		pr_warning("error: inner_map_fd already specified\n");
2784 		return -EINVAL;
2785 	}
2786 	map->inner_map_fd = fd;
2787 	return 0;
2788 }
2789 
2790 static struct bpf_map *
2791 __bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
2792 {
2793 	ssize_t idx;
2794 	struct bpf_map *s, *e;
2795 
2796 	if (!obj || !obj->maps)
2797 		return NULL;
2798 
2799 	s = obj->maps;
2800 	e = obj->maps + obj->nr_maps;
2801 
2802 	if ((m < s) || (m >= e)) {
2803 		pr_warning("error in %s: map handler doesn't belong to object\n",
2804 			   __func__);
2805 		return NULL;
2806 	}
2807 
2808 	idx = (m - obj->maps) + i;
2809 	if (idx >= obj->nr_maps || idx < 0)
2810 		return NULL;
2811 	return &obj->maps[idx];
2812 }
2813 
2814 struct bpf_map *
2815 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2816 {
2817 	if (prev == NULL)
2818 		return obj->maps;
2819 
2820 	return __bpf_map__iter(prev, obj, 1);
2821 }
2822 
2823 struct bpf_map *
2824 bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
2825 {
2826 	if (next == NULL) {
2827 		if (!obj->nr_maps)
2828 			return NULL;
2829 		return obj->maps + obj->nr_maps - 1;
2830 	}
2831 
2832 	return __bpf_map__iter(next, obj, -1);
2833 }
2834 
2835 struct bpf_map *
2836 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
2837 {
2838 	struct bpf_map *pos;
2839 
2840 	bpf_object__for_each_map(pos, obj) {
2841 		if (pos->name && !strcmp(pos->name, name))
2842 			return pos;
2843 	}
2844 	return NULL;
2845 }
2846 
2847 int
2848 bpf_object__find_map_fd_by_name(struct bpf_object *obj, const char *name)
2849 {
2850 	return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
2851 }
2852 
2853 struct bpf_map *
2854 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2855 {
2856 	int i;
2857 
2858 	for (i = 0; i < obj->nr_maps; i++) {
2859 		if (obj->maps[i].offset == offset)
2860 			return &obj->maps[i];
2861 	}
2862 	return ERR_PTR(-ENOENT);
2863 }
2864 
2865 long libbpf_get_error(const void *ptr)
2866 {
2867 	if (IS_ERR(ptr))
2868 		return PTR_ERR(ptr);
2869 	return 0;
2870 }
2871 
2872 int bpf_prog_load(const char *file, enum bpf_prog_type type,
2873 		  struct bpf_object **pobj, int *prog_fd)
2874 {
2875 	struct bpf_prog_load_attr attr;
2876 
2877 	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2878 	attr.file = file;
2879 	attr.prog_type = type;
2880 	attr.expected_attach_type = 0;
2881 
2882 	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2883 }
2884 
2885 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2886 			struct bpf_object **pobj, int *prog_fd)
2887 {
2888 	struct bpf_object_open_attr open_attr = {
2889 		.file		= attr->file,
2890 		.prog_type	= attr->prog_type,
2891 	};
2892 	struct bpf_program *prog, *first_prog = NULL;
2893 	enum bpf_attach_type expected_attach_type;
2894 	enum bpf_prog_type prog_type;
2895 	struct bpf_object *obj;
2896 	struct bpf_map *map;
2897 	int err;
2898 
2899 	if (!attr)
2900 		return -EINVAL;
2901 	if (!attr->file)
2902 		return -EINVAL;
2903 
2904 	obj = bpf_object__open_xattr(&open_attr);
2905 	if (IS_ERR_OR_NULL(obj))
2906 		return -ENOENT;
2907 
2908 	bpf_object__for_each_program(prog, obj) {
2909 		/*
2910 		 * If type is not specified, try to guess it based on
2911 		 * section name.
2912 		 */
2913 		prog_type = attr->prog_type;
2914 		prog->prog_ifindex = attr->ifindex;
2915 		expected_attach_type = attr->expected_attach_type;
2916 		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2917 			err = bpf_program__identify_section(prog, &prog_type,
2918 							    &expected_attach_type);
2919 			if (err < 0) {
2920 				bpf_object__close(obj);
2921 				return -EINVAL;
2922 			}
2923 		}
2924 
2925 		bpf_program__set_type(prog, prog_type);
2926 		bpf_program__set_expected_attach_type(prog,
2927 						      expected_attach_type);
2928 
2929 		if (!first_prog)
2930 			first_prog = prog;
2931 	}
2932 
2933 	bpf_object__for_each_map(map, obj) {
2934 		if (!bpf_map__is_offload_neutral(map))
2935 			map->map_ifindex = attr->ifindex;
2936 	}
2937 
2938 	if (!first_prog) {
2939 		pr_warning("object file doesn't contain bpf program\n");
2940 		bpf_object__close(obj);
2941 		return -ENOENT;
2942 	}
2943 
2944 	err = bpf_object__load(obj);
2945 	if (err) {
2946 		bpf_object__close(obj);
2947 		return -EINVAL;
2948 	}
2949 
2950 	*pobj = obj;
2951 	*prog_fd = bpf_program__fd(first_prog);
2952 	return 0;
2953 }
2954 
2955 enum bpf_perf_event_ret
2956 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2957 			   void **copy_mem, size_t *copy_size,
2958 			   bpf_perf_event_print_t fn, void *private_data)
2959 {
2960 	struct perf_event_mmap_page *header = mmap_mem;
2961 	__u64 data_head = ring_buffer_read_head(header);
2962 	__u64 data_tail = header->data_tail;
2963 	void *base = ((__u8 *)header) + page_size;
2964 	int ret = LIBBPF_PERF_EVENT_CONT;
2965 	struct perf_event_header *ehdr;
2966 	size_t ehdr_size;
2967 
2968 	while (data_head != data_tail) {
2969 		ehdr = base + (data_tail & (mmap_size - 1));
2970 		ehdr_size = ehdr->size;
2971 
2972 		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
2973 			void *copy_start = ehdr;
2974 			size_t len_first = base + mmap_size - copy_start;
2975 			size_t len_secnd = ehdr_size - len_first;
2976 
2977 			if (*copy_size < ehdr_size) {
2978 				free(*copy_mem);
2979 				*copy_mem = malloc(ehdr_size);
2980 				if (!*copy_mem) {
2981 					*copy_size = 0;
2982 					ret = LIBBPF_PERF_EVENT_ERROR;
2983 					break;
2984 				}
2985 				*copy_size = ehdr_size;
2986 			}
2987 
2988 			memcpy(*copy_mem, copy_start, len_first);
2989 			memcpy(*copy_mem + len_first, base, len_secnd);
2990 			ehdr = *copy_mem;
2991 		}
2992 
2993 		ret = fn(ehdr, private_data);
2994 		data_tail += ehdr_size;
2995 		if (ret != LIBBPF_PERF_EVENT_CONT)
2996 			break;
2997 	}
2998 
2999 	ring_buffer_write_tail(header, data_tail);
3000 	return ret;
3001 }
3002