xref: /openbmc/linux/tools/lib/bpf/libbpf.c (revision 71844fac)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  */
11 
12 #ifndef _GNU_SOURCE
13 #define _GNU_SOURCE
14 #endif
15 #include <stdlib.h>
16 #include <stdio.h>
17 #include <stdarg.h>
18 #include <libgen.h>
19 #include <inttypes.h>
20 #include <string.h>
21 #include <unistd.h>
22 #include <fcntl.h>
23 #include <errno.h>
24 #include <asm/unistd.h>
25 #include <linux/err.h>
26 #include <linux/kernel.h>
27 #include <linux/bpf.h>
28 #include <linux/btf.h>
29 #include <linux/filter.h>
30 #include <linux/list.h>
31 #include <linux/limits.h>
32 #include <linux/perf_event.h>
33 #include <linux/ring_buffer.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/vfs.h>
37 #include <tools/libc_compat.h>
38 #include <libelf.h>
39 #include <gelf.h>
40 
41 #include "libbpf.h"
42 #include "bpf.h"
43 #include "btf.h"
44 #include "str_error.h"
45 
46 #ifndef EM_BPF
47 #define EM_BPF 247
48 #endif
49 
50 #ifndef BPF_FS_MAGIC
51 #define BPF_FS_MAGIC		0xcafe4a11
52 #endif
53 
54 #define __printf(a, b)	__attribute__((format(printf, a, b)))
55 
56 __printf(1, 2)
57 static int __base_pr(const char *format, ...)
58 {
59 	va_list args;
60 	int err;
61 
62 	va_start(args, format);
63 	err = vfprintf(stderr, format, args);
64 	va_end(args);
65 	return err;
66 }
67 
68 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
69 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
70 static __printf(1, 2) libbpf_print_fn_t __pr_debug;
71 
72 #define __pr(func, fmt, ...)	\
73 do {				\
74 	if ((func))		\
75 		(func)("libbpf: " fmt, ##__VA_ARGS__); \
76 } while (0)
77 
78 #define pr_warning(fmt, ...)	__pr(__pr_warning, fmt, ##__VA_ARGS__)
79 #define pr_info(fmt, ...)	__pr(__pr_info, fmt, ##__VA_ARGS__)
80 #define pr_debug(fmt, ...)	__pr(__pr_debug, fmt, ##__VA_ARGS__)
81 
82 void libbpf_set_print(libbpf_print_fn_t warn,
83 		      libbpf_print_fn_t info,
84 		      libbpf_print_fn_t debug)
85 {
86 	__pr_warning = warn;
87 	__pr_info = info;
88 	__pr_debug = debug;
89 }
90 
91 #define STRERR_BUFSIZE  128
92 
93 #define CHECK_ERR(action, err, out) do {	\
94 	err = action;			\
95 	if (err)			\
96 		goto out;		\
97 } while(0)
98 
99 
100 /* Copied from tools/perf/util/util.h */
101 #ifndef zfree
102 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
103 #endif
104 
105 #ifndef zclose
106 # define zclose(fd) ({			\
107 	int ___err = 0;			\
108 	if ((fd) >= 0)			\
109 		___err = close((fd));	\
110 	fd = -1;			\
111 	___err; })
112 #endif
113 
114 #ifdef HAVE_LIBELF_MMAP_SUPPORT
115 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
116 #else
117 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
118 #endif
119 
120 struct bpf_capabilities {
121 	/* v4.14: kernel support for program & map names. */
122 	__u32 name:1;
123 };
124 
125 /*
126  * bpf_prog should be a better name but it has been used in
127  * linux/filter.h.
128  */
129 struct bpf_program {
130 	/* Index in elf obj file, for relocation use. */
131 	int idx;
132 	char *name;
133 	int prog_ifindex;
134 	char *section_name;
135 	/* section_name with / replaced by _; makes recursive pinning
136 	 * in bpf_object__pin_programs easier
137 	 */
138 	char *pin_name;
139 	struct bpf_insn *insns;
140 	size_t insns_cnt, main_prog_cnt;
141 	enum bpf_prog_type type;
142 
143 	struct reloc_desc {
144 		enum {
145 			RELO_LD64,
146 			RELO_CALL,
147 		} type;
148 		int insn_idx;
149 		union {
150 			int map_idx;
151 			int text_off;
152 		};
153 	} *reloc_desc;
154 	int nr_reloc;
155 
156 	struct {
157 		int nr;
158 		int *fds;
159 	} instances;
160 	bpf_program_prep_t preprocessor;
161 
162 	struct bpf_object *obj;
163 	void *priv;
164 	bpf_program_clear_priv_t clear_priv;
165 
166 	enum bpf_attach_type expected_attach_type;
167 	int btf_fd;
168 	void *func_info;
169 	__u32 func_info_rec_size;
170 	__u32 func_info_len;
171 
172 	struct bpf_capabilities *caps;
173 };
174 
175 struct bpf_map {
176 	int fd;
177 	char *name;
178 	size_t offset;
179 	int map_ifindex;
180 	int inner_map_fd;
181 	struct bpf_map_def def;
182 	__u32 btf_key_type_id;
183 	__u32 btf_value_type_id;
184 	void *priv;
185 	bpf_map_clear_priv_t clear_priv;
186 };
187 
188 static LIST_HEAD(bpf_objects_list);
189 
190 struct bpf_object {
191 	char license[64];
192 	__u32 kern_version;
193 
194 	struct bpf_program *programs;
195 	size_t nr_programs;
196 	struct bpf_map *maps;
197 	size_t nr_maps;
198 
199 	bool loaded;
200 	bool has_pseudo_calls;
201 
202 	/*
203 	 * Information when doing elf related work. Only valid if fd
204 	 * is valid.
205 	 */
206 	struct {
207 		int fd;
208 		void *obj_buf;
209 		size_t obj_buf_sz;
210 		Elf *elf;
211 		GElf_Ehdr ehdr;
212 		Elf_Data *symbols;
213 		size_t strtabidx;
214 		struct {
215 			GElf_Shdr shdr;
216 			Elf_Data *data;
217 		} *reloc;
218 		int nr_reloc;
219 		int maps_shndx;
220 		int text_shndx;
221 	} efile;
222 	/*
223 	 * All loaded bpf_object is linked in a list, which is
224 	 * hidden to caller. bpf_objects__<func> handlers deal with
225 	 * all objects.
226 	 */
227 	struct list_head list;
228 
229 	struct btf *btf;
230 	struct btf_ext *btf_ext;
231 
232 	void *priv;
233 	bpf_object_clear_priv_t clear_priv;
234 
235 	struct bpf_capabilities caps;
236 
237 	char path[];
238 };
239 #define obj_elf_valid(o)	((o)->efile.elf)
240 
241 void bpf_program__unload(struct bpf_program *prog)
242 {
243 	int i;
244 
245 	if (!prog)
246 		return;
247 
248 	/*
249 	 * If the object is opened but the program was never loaded,
250 	 * it is possible that prog->instances.nr == -1.
251 	 */
252 	if (prog->instances.nr > 0) {
253 		for (i = 0; i < prog->instances.nr; i++)
254 			zclose(prog->instances.fds[i]);
255 	} else if (prog->instances.nr != -1) {
256 		pr_warning("Internal error: instances.nr is %d\n",
257 			   prog->instances.nr);
258 	}
259 
260 	prog->instances.nr = -1;
261 	zfree(&prog->instances.fds);
262 
263 	zclose(prog->btf_fd);
264 	zfree(&prog->func_info);
265 }
266 
267 static void bpf_program__exit(struct bpf_program *prog)
268 {
269 	if (!prog)
270 		return;
271 
272 	if (prog->clear_priv)
273 		prog->clear_priv(prog, prog->priv);
274 
275 	prog->priv = NULL;
276 	prog->clear_priv = NULL;
277 
278 	bpf_program__unload(prog);
279 	zfree(&prog->name);
280 	zfree(&prog->section_name);
281 	zfree(&prog->pin_name);
282 	zfree(&prog->insns);
283 	zfree(&prog->reloc_desc);
284 
285 	prog->nr_reloc = 0;
286 	prog->insns_cnt = 0;
287 	prog->idx = -1;
288 }
289 
290 static char *__bpf_program__pin_name(struct bpf_program *prog)
291 {
292 	char *name, *p;
293 
294 	name = p = strdup(prog->section_name);
295 	while ((p = strchr(p, '/')))
296 		*p = '_';
297 
298 	return name;
299 }
300 
301 static int
302 bpf_program__init(void *data, size_t size, char *section_name, int idx,
303 		  struct bpf_program *prog)
304 {
305 	if (size < sizeof(struct bpf_insn)) {
306 		pr_warning("corrupted section '%s'\n", section_name);
307 		return -EINVAL;
308 	}
309 
310 	bzero(prog, sizeof(*prog));
311 
312 	prog->section_name = strdup(section_name);
313 	if (!prog->section_name) {
314 		pr_warning("failed to alloc name for prog under section(%d) %s\n",
315 			   idx, section_name);
316 		goto errout;
317 	}
318 
319 	prog->pin_name = __bpf_program__pin_name(prog);
320 	if (!prog->pin_name) {
321 		pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
322 			   idx, section_name);
323 		goto errout;
324 	}
325 
326 	prog->insns = malloc(size);
327 	if (!prog->insns) {
328 		pr_warning("failed to alloc insns for prog under section %s\n",
329 			   section_name);
330 		goto errout;
331 	}
332 	prog->insns_cnt = size / sizeof(struct bpf_insn);
333 	memcpy(prog->insns, data,
334 	       prog->insns_cnt * sizeof(struct bpf_insn));
335 	prog->idx = idx;
336 	prog->instances.fds = NULL;
337 	prog->instances.nr = -1;
338 	prog->type = BPF_PROG_TYPE_UNSPEC;
339 	prog->btf_fd = -1;
340 
341 	return 0;
342 errout:
343 	bpf_program__exit(prog);
344 	return -ENOMEM;
345 }
346 
347 static int
348 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
349 			char *section_name, int idx)
350 {
351 	struct bpf_program prog, *progs;
352 	int nr_progs, err;
353 
354 	err = bpf_program__init(data, size, section_name, idx, &prog);
355 	if (err)
356 		return err;
357 
358 	prog.caps = &obj->caps;
359 	progs = obj->programs;
360 	nr_progs = obj->nr_programs;
361 
362 	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
363 	if (!progs) {
364 		/*
365 		 * In this case the original obj->programs
366 		 * is still valid, so don't need special treat for
367 		 * bpf_close_object().
368 		 */
369 		pr_warning("failed to alloc a new program under section '%s'\n",
370 			   section_name);
371 		bpf_program__exit(&prog);
372 		return -ENOMEM;
373 	}
374 
375 	pr_debug("found program %s\n", prog.section_name);
376 	obj->programs = progs;
377 	obj->nr_programs = nr_progs + 1;
378 	prog.obj = obj;
379 	progs[nr_progs] = prog;
380 	return 0;
381 }
382 
383 static int
384 bpf_object__init_prog_names(struct bpf_object *obj)
385 {
386 	Elf_Data *symbols = obj->efile.symbols;
387 	struct bpf_program *prog;
388 	size_t pi, si;
389 
390 	for (pi = 0; pi < obj->nr_programs; pi++) {
391 		const char *name = NULL;
392 
393 		prog = &obj->programs[pi];
394 
395 		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
396 		     si++) {
397 			GElf_Sym sym;
398 
399 			if (!gelf_getsym(symbols, si, &sym))
400 				continue;
401 			if (sym.st_shndx != prog->idx)
402 				continue;
403 			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
404 				continue;
405 
406 			name = elf_strptr(obj->efile.elf,
407 					  obj->efile.strtabidx,
408 					  sym.st_name);
409 			if (!name) {
410 				pr_warning("failed to get sym name string for prog %s\n",
411 					   prog->section_name);
412 				return -LIBBPF_ERRNO__LIBELF;
413 			}
414 		}
415 
416 		if (!name && prog->idx == obj->efile.text_shndx)
417 			name = ".text";
418 
419 		if (!name) {
420 			pr_warning("failed to find sym for prog %s\n",
421 				   prog->section_name);
422 			return -EINVAL;
423 		}
424 
425 		prog->name = strdup(name);
426 		if (!prog->name) {
427 			pr_warning("failed to allocate memory for prog sym %s\n",
428 				   name);
429 			return -ENOMEM;
430 		}
431 	}
432 
433 	return 0;
434 }
435 
436 static struct bpf_object *bpf_object__new(const char *path,
437 					  void *obj_buf,
438 					  size_t obj_buf_sz)
439 {
440 	struct bpf_object *obj;
441 
442 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
443 	if (!obj) {
444 		pr_warning("alloc memory failed for %s\n", path);
445 		return ERR_PTR(-ENOMEM);
446 	}
447 
448 	strcpy(obj->path, path);
449 	obj->efile.fd = -1;
450 
451 	/*
452 	 * Caller of this function should also calls
453 	 * bpf_object__elf_finish() after data collection to return
454 	 * obj_buf to user. If not, we should duplicate the buffer to
455 	 * avoid user freeing them before elf finish.
456 	 */
457 	obj->efile.obj_buf = obj_buf;
458 	obj->efile.obj_buf_sz = obj_buf_sz;
459 	obj->efile.maps_shndx = -1;
460 
461 	obj->loaded = false;
462 
463 	INIT_LIST_HEAD(&obj->list);
464 	list_add(&obj->list, &bpf_objects_list);
465 	return obj;
466 }
467 
468 static void bpf_object__elf_finish(struct bpf_object *obj)
469 {
470 	if (!obj_elf_valid(obj))
471 		return;
472 
473 	if (obj->efile.elf) {
474 		elf_end(obj->efile.elf);
475 		obj->efile.elf = NULL;
476 	}
477 	obj->efile.symbols = NULL;
478 
479 	zfree(&obj->efile.reloc);
480 	obj->efile.nr_reloc = 0;
481 	zclose(obj->efile.fd);
482 	obj->efile.obj_buf = NULL;
483 	obj->efile.obj_buf_sz = 0;
484 }
485 
486 static int bpf_object__elf_init(struct bpf_object *obj)
487 {
488 	int err = 0;
489 	GElf_Ehdr *ep;
490 
491 	if (obj_elf_valid(obj)) {
492 		pr_warning("elf init: internal error\n");
493 		return -LIBBPF_ERRNO__LIBELF;
494 	}
495 
496 	if (obj->efile.obj_buf_sz > 0) {
497 		/*
498 		 * obj_buf should have been validated by
499 		 * bpf_object__open_buffer().
500 		 */
501 		obj->efile.elf = elf_memory(obj->efile.obj_buf,
502 					    obj->efile.obj_buf_sz);
503 	} else {
504 		obj->efile.fd = open(obj->path, O_RDONLY);
505 		if (obj->efile.fd < 0) {
506 			char errmsg[STRERR_BUFSIZE];
507 			char *cp = libbpf_strerror_r(errno, errmsg,
508 						     sizeof(errmsg));
509 
510 			pr_warning("failed to open %s: %s\n", obj->path, cp);
511 			return -errno;
512 		}
513 
514 		obj->efile.elf = elf_begin(obj->efile.fd,
515 				LIBBPF_ELF_C_READ_MMAP,
516 				NULL);
517 	}
518 
519 	if (!obj->efile.elf) {
520 		pr_warning("failed to open %s as ELF file\n",
521 				obj->path);
522 		err = -LIBBPF_ERRNO__LIBELF;
523 		goto errout;
524 	}
525 
526 	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
527 		pr_warning("failed to get EHDR from %s\n",
528 				obj->path);
529 		err = -LIBBPF_ERRNO__FORMAT;
530 		goto errout;
531 	}
532 	ep = &obj->efile.ehdr;
533 
534 	/* Old LLVM set e_machine to EM_NONE */
535 	if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
536 		pr_warning("%s is not an eBPF object file\n",
537 			obj->path);
538 		err = -LIBBPF_ERRNO__FORMAT;
539 		goto errout;
540 	}
541 
542 	return 0;
543 errout:
544 	bpf_object__elf_finish(obj);
545 	return err;
546 }
547 
548 static int
549 bpf_object__check_endianness(struct bpf_object *obj)
550 {
551 	static unsigned int const endian = 1;
552 
553 	switch (obj->efile.ehdr.e_ident[EI_DATA]) {
554 	case ELFDATA2LSB:
555 		/* We are big endian, BPF obj is little endian. */
556 		if (*(unsigned char const *)&endian != 1)
557 			goto mismatch;
558 		break;
559 
560 	case ELFDATA2MSB:
561 		/* We are little endian, BPF obj is big endian. */
562 		if (*(unsigned char const *)&endian != 0)
563 			goto mismatch;
564 		break;
565 	default:
566 		return -LIBBPF_ERRNO__ENDIAN;
567 	}
568 
569 	return 0;
570 
571 mismatch:
572 	pr_warning("Error: endianness mismatch.\n");
573 	return -LIBBPF_ERRNO__ENDIAN;
574 }
575 
576 static int
577 bpf_object__init_license(struct bpf_object *obj,
578 			 void *data, size_t size)
579 {
580 	memcpy(obj->license, data,
581 	       min(size, sizeof(obj->license) - 1));
582 	pr_debug("license of %s is %s\n", obj->path, obj->license);
583 	return 0;
584 }
585 
586 static int
587 bpf_object__init_kversion(struct bpf_object *obj,
588 			  void *data, size_t size)
589 {
590 	__u32 kver;
591 
592 	if (size != sizeof(kver)) {
593 		pr_warning("invalid kver section in %s\n", obj->path);
594 		return -LIBBPF_ERRNO__FORMAT;
595 	}
596 	memcpy(&kver, data, sizeof(kver));
597 	obj->kern_version = kver;
598 	pr_debug("kernel version of %s is %x\n", obj->path,
599 		 obj->kern_version);
600 	return 0;
601 }
602 
603 static int compare_bpf_map(const void *_a, const void *_b)
604 {
605 	const struct bpf_map *a = _a;
606 	const struct bpf_map *b = _b;
607 
608 	return a->offset - b->offset;
609 }
610 
611 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
612 {
613 	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
614 	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
615 		return true;
616 	return false;
617 }
618 
619 static int
620 bpf_object__init_maps(struct bpf_object *obj, int flags)
621 {
622 	bool strict = !(flags & MAPS_RELAX_COMPAT);
623 	int i, map_idx, map_def_sz, nr_maps = 0;
624 	Elf_Scn *scn;
625 	Elf_Data *data;
626 	Elf_Data *symbols = obj->efile.symbols;
627 
628 	if (obj->efile.maps_shndx < 0)
629 		return -EINVAL;
630 	if (!symbols)
631 		return -EINVAL;
632 
633 	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
634 	if (scn)
635 		data = elf_getdata(scn, NULL);
636 	if (!scn || !data) {
637 		pr_warning("failed to get Elf_Data from map section %d\n",
638 			   obj->efile.maps_shndx);
639 		return -EINVAL;
640 	}
641 
642 	/*
643 	 * Count number of maps. Each map has a name.
644 	 * Array of maps is not supported: only the first element is
645 	 * considered.
646 	 *
647 	 * TODO: Detect array of map and report error.
648 	 */
649 	for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
650 		GElf_Sym sym;
651 
652 		if (!gelf_getsym(symbols, i, &sym))
653 			continue;
654 		if (sym.st_shndx != obj->efile.maps_shndx)
655 			continue;
656 		nr_maps++;
657 	}
658 
659 	/* Alloc obj->maps and fill nr_maps. */
660 	pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
661 		 nr_maps, data->d_size);
662 
663 	if (!nr_maps)
664 		return 0;
665 
666 	/* Assume equally sized map definitions */
667 	map_def_sz = data->d_size / nr_maps;
668 	if (!data->d_size || (data->d_size % nr_maps) != 0) {
669 		pr_warning("unable to determine map definition size "
670 			   "section %s, %d maps in %zd bytes\n",
671 			   obj->path, nr_maps, data->d_size);
672 		return -EINVAL;
673 	}
674 
675 	obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
676 	if (!obj->maps) {
677 		pr_warning("alloc maps for object failed\n");
678 		return -ENOMEM;
679 	}
680 	obj->nr_maps = nr_maps;
681 
682 	for (i = 0; i < nr_maps; i++) {
683 		/*
684 		 * fill all fd with -1 so won't close incorrect
685 		 * fd (fd=0 is stdin) when failure (zclose won't close
686 		 * negative fd)).
687 		 */
688 		obj->maps[i].fd = -1;
689 		obj->maps[i].inner_map_fd = -1;
690 	}
691 
692 	/*
693 	 * Fill obj->maps using data in "maps" section.
694 	 */
695 	for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
696 		GElf_Sym sym;
697 		const char *map_name;
698 		struct bpf_map_def *def;
699 
700 		if (!gelf_getsym(symbols, i, &sym))
701 			continue;
702 		if (sym.st_shndx != obj->efile.maps_shndx)
703 			continue;
704 
705 		map_name = elf_strptr(obj->efile.elf,
706 				      obj->efile.strtabidx,
707 				      sym.st_name);
708 		obj->maps[map_idx].offset = sym.st_value;
709 		if (sym.st_value + map_def_sz > data->d_size) {
710 			pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
711 				   obj->path, map_name);
712 			return -EINVAL;
713 		}
714 
715 		obj->maps[map_idx].name = strdup(map_name);
716 		if (!obj->maps[map_idx].name) {
717 			pr_warning("failed to alloc map name\n");
718 			return -ENOMEM;
719 		}
720 		pr_debug("map %d is \"%s\"\n", map_idx,
721 			 obj->maps[map_idx].name);
722 		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
723 		/*
724 		 * If the definition of the map in the object file fits in
725 		 * bpf_map_def, copy it.  Any extra fields in our version
726 		 * of bpf_map_def will default to zero as a result of the
727 		 * calloc above.
728 		 */
729 		if (map_def_sz <= sizeof(struct bpf_map_def)) {
730 			memcpy(&obj->maps[map_idx].def, def, map_def_sz);
731 		} else {
732 			/*
733 			 * Here the map structure being read is bigger than what
734 			 * we expect, truncate if the excess bits are all zero.
735 			 * If they are not zero, reject this map as
736 			 * incompatible.
737 			 */
738 			char *b;
739 			for (b = ((char *)def) + sizeof(struct bpf_map_def);
740 			     b < ((char *)def) + map_def_sz; b++) {
741 				if (*b != 0) {
742 					pr_warning("maps section in %s: \"%s\" "
743 						   "has unrecognized, non-zero "
744 						   "options\n",
745 						   obj->path, map_name);
746 					if (strict)
747 						return -EINVAL;
748 				}
749 			}
750 			memcpy(&obj->maps[map_idx].def, def,
751 			       sizeof(struct bpf_map_def));
752 		}
753 		map_idx++;
754 	}
755 
756 	qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
757 	return 0;
758 }
759 
760 static bool section_have_execinstr(struct bpf_object *obj, int idx)
761 {
762 	Elf_Scn *scn;
763 	GElf_Shdr sh;
764 
765 	scn = elf_getscn(obj->efile.elf, idx);
766 	if (!scn)
767 		return false;
768 
769 	if (gelf_getshdr(scn, &sh) != &sh)
770 		return false;
771 
772 	if (sh.sh_flags & SHF_EXECINSTR)
773 		return true;
774 
775 	return false;
776 }
777 
778 static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
779 {
780 	Elf *elf = obj->efile.elf;
781 	GElf_Ehdr *ep = &obj->efile.ehdr;
782 	Elf_Scn *scn = NULL;
783 	int idx = 0, err = 0;
784 
785 	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
786 	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
787 		pr_warning("failed to get e_shstrndx from %s\n",
788 			   obj->path);
789 		return -LIBBPF_ERRNO__FORMAT;
790 	}
791 
792 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
793 		char *name;
794 		GElf_Shdr sh;
795 		Elf_Data *data;
796 
797 		idx++;
798 		if (gelf_getshdr(scn, &sh) != &sh) {
799 			pr_warning("failed to get section(%d) header from %s\n",
800 				   idx, obj->path);
801 			err = -LIBBPF_ERRNO__FORMAT;
802 			goto out;
803 		}
804 
805 		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
806 		if (!name) {
807 			pr_warning("failed to get section(%d) name from %s\n",
808 				   idx, obj->path);
809 			err = -LIBBPF_ERRNO__FORMAT;
810 			goto out;
811 		}
812 
813 		data = elf_getdata(scn, 0);
814 		if (!data) {
815 			pr_warning("failed to get section(%d) data from %s(%s)\n",
816 				   idx, name, obj->path);
817 			err = -LIBBPF_ERRNO__FORMAT;
818 			goto out;
819 		}
820 		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
821 			 idx, name, (unsigned long)data->d_size,
822 			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
823 			 (int)sh.sh_type);
824 
825 		if (strcmp(name, "license") == 0)
826 			err = bpf_object__init_license(obj,
827 						       data->d_buf,
828 						       data->d_size);
829 		else if (strcmp(name, "version") == 0)
830 			err = bpf_object__init_kversion(obj,
831 							data->d_buf,
832 							data->d_size);
833 		else if (strcmp(name, "maps") == 0)
834 			obj->efile.maps_shndx = idx;
835 		else if (strcmp(name, BTF_ELF_SEC) == 0) {
836 			obj->btf = btf__new(data->d_buf, data->d_size,
837 					    __pr_debug);
838 			if (IS_ERR(obj->btf)) {
839 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
840 					   BTF_ELF_SEC, PTR_ERR(obj->btf));
841 				obj->btf = NULL;
842 			}
843 		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
844 			obj->btf_ext = btf_ext__new(data->d_buf, data->d_size,
845 						    __pr_debug);
846 			if (IS_ERR(obj->btf_ext)) {
847 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
848 					   BTF_EXT_ELF_SEC,
849 					   PTR_ERR(obj->btf_ext));
850 				obj->btf_ext = NULL;
851 			}
852 		} else if (sh.sh_type == SHT_SYMTAB) {
853 			if (obj->efile.symbols) {
854 				pr_warning("bpf: multiple SYMTAB in %s\n",
855 					   obj->path);
856 				err = -LIBBPF_ERRNO__FORMAT;
857 			} else {
858 				obj->efile.symbols = data;
859 				obj->efile.strtabidx = sh.sh_link;
860 			}
861 		} else if ((sh.sh_type == SHT_PROGBITS) &&
862 			   (sh.sh_flags & SHF_EXECINSTR) &&
863 			   (data->d_size > 0)) {
864 			if (strcmp(name, ".text") == 0)
865 				obj->efile.text_shndx = idx;
866 			err = bpf_object__add_program(obj, data->d_buf,
867 						      data->d_size, name, idx);
868 			if (err) {
869 				char errmsg[STRERR_BUFSIZE];
870 				char *cp = libbpf_strerror_r(-err, errmsg,
871 							     sizeof(errmsg));
872 
873 				pr_warning("failed to alloc program %s (%s): %s",
874 					   name, obj->path, cp);
875 			}
876 		} else if (sh.sh_type == SHT_REL) {
877 			void *reloc = obj->efile.reloc;
878 			int nr_reloc = obj->efile.nr_reloc + 1;
879 			int sec = sh.sh_info; /* points to other section */
880 
881 			/* Only do relo for section with exec instructions */
882 			if (!section_have_execinstr(obj, sec)) {
883 				pr_debug("skip relo %s(%d) for section(%d)\n",
884 					 name, idx, sec);
885 				continue;
886 			}
887 
888 			reloc = reallocarray(reloc, nr_reloc,
889 					     sizeof(*obj->efile.reloc));
890 			if (!reloc) {
891 				pr_warning("realloc failed\n");
892 				err = -ENOMEM;
893 			} else {
894 				int n = nr_reloc - 1;
895 
896 				obj->efile.reloc = reloc;
897 				obj->efile.nr_reloc = nr_reloc;
898 
899 				obj->efile.reloc[n].shdr = sh;
900 				obj->efile.reloc[n].data = data;
901 			}
902 		} else {
903 			pr_debug("skip section(%d) %s\n", idx, name);
904 		}
905 		if (err)
906 			goto out;
907 	}
908 
909 	if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
910 		pr_warning("Corrupted ELF file: index of strtab invalid\n");
911 		return LIBBPF_ERRNO__FORMAT;
912 	}
913 	if (obj->efile.maps_shndx >= 0) {
914 		err = bpf_object__init_maps(obj, flags);
915 		if (err)
916 			goto out;
917 	}
918 	err = bpf_object__init_prog_names(obj);
919 out:
920 	return err;
921 }
922 
923 static struct bpf_program *
924 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
925 {
926 	struct bpf_program *prog;
927 	size_t i;
928 
929 	for (i = 0; i < obj->nr_programs; i++) {
930 		prog = &obj->programs[i];
931 		if (prog->idx == idx)
932 			return prog;
933 	}
934 	return NULL;
935 }
936 
937 struct bpf_program *
938 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
939 {
940 	struct bpf_program *pos;
941 
942 	bpf_object__for_each_program(pos, obj) {
943 		if (pos->section_name && !strcmp(pos->section_name, title))
944 			return pos;
945 	}
946 	return NULL;
947 }
948 
949 static int
950 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
951 			   Elf_Data *data, struct bpf_object *obj)
952 {
953 	Elf_Data *symbols = obj->efile.symbols;
954 	int text_shndx = obj->efile.text_shndx;
955 	int maps_shndx = obj->efile.maps_shndx;
956 	struct bpf_map *maps = obj->maps;
957 	size_t nr_maps = obj->nr_maps;
958 	int i, nrels;
959 
960 	pr_debug("collecting relocating info for: '%s'\n",
961 		 prog->section_name);
962 	nrels = shdr->sh_size / shdr->sh_entsize;
963 
964 	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
965 	if (!prog->reloc_desc) {
966 		pr_warning("failed to alloc memory in relocation\n");
967 		return -ENOMEM;
968 	}
969 	prog->nr_reloc = nrels;
970 
971 	for (i = 0; i < nrels; i++) {
972 		GElf_Sym sym;
973 		GElf_Rel rel;
974 		unsigned int insn_idx;
975 		struct bpf_insn *insns = prog->insns;
976 		size_t map_idx;
977 
978 		if (!gelf_getrel(data, i, &rel)) {
979 			pr_warning("relocation: failed to get %d reloc\n", i);
980 			return -LIBBPF_ERRNO__FORMAT;
981 		}
982 
983 		if (!gelf_getsym(symbols,
984 				 GELF_R_SYM(rel.r_info),
985 				 &sym)) {
986 			pr_warning("relocation: symbol %"PRIx64" not found\n",
987 				   GELF_R_SYM(rel.r_info));
988 			return -LIBBPF_ERRNO__FORMAT;
989 		}
990 		pr_debug("relo for %lld value %lld name %d\n",
991 			 (long long) (rel.r_info >> 32),
992 			 (long long) sym.st_value, sym.st_name);
993 
994 		if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
995 			pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
996 				   prog->section_name, sym.st_shndx);
997 			return -LIBBPF_ERRNO__RELOC;
998 		}
999 
1000 		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1001 		pr_debug("relocation: insn_idx=%u\n", insn_idx);
1002 
1003 		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1004 			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1005 				pr_warning("incorrect bpf_call opcode\n");
1006 				return -LIBBPF_ERRNO__RELOC;
1007 			}
1008 			prog->reloc_desc[i].type = RELO_CALL;
1009 			prog->reloc_desc[i].insn_idx = insn_idx;
1010 			prog->reloc_desc[i].text_off = sym.st_value;
1011 			obj->has_pseudo_calls = true;
1012 			continue;
1013 		}
1014 
1015 		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1016 			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1017 				   insn_idx, insns[insn_idx].code);
1018 			return -LIBBPF_ERRNO__RELOC;
1019 		}
1020 
1021 		/* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
1022 		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1023 			if (maps[map_idx].offset == sym.st_value) {
1024 				pr_debug("relocation: find map %zd (%s) for insn %u\n",
1025 					 map_idx, maps[map_idx].name, insn_idx);
1026 				break;
1027 			}
1028 		}
1029 
1030 		if (map_idx >= nr_maps) {
1031 			pr_warning("bpf relocation: map_idx %d large than %d\n",
1032 				   (int)map_idx, (int)nr_maps - 1);
1033 			return -LIBBPF_ERRNO__RELOC;
1034 		}
1035 
1036 		prog->reloc_desc[i].type = RELO_LD64;
1037 		prog->reloc_desc[i].insn_idx = insn_idx;
1038 		prog->reloc_desc[i].map_idx = map_idx;
1039 	}
1040 	return 0;
1041 }
1042 
1043 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1044 {
1045 	const struct btf_type *container_type;
1046 	const struct btf_member *key, *value;
1047 	struct bpf_map_def *def = &map->def;
1048 	const size_t max_name = 256;
1049 	char container_name[max_name];
1050 	__s64 key_size, value_size;
1051 	__s32 container_id;
1052 
1053 	if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
1054 	    max_name) {
1055 		pr_warning("map:%s length of '____btf_map_%s' is too long\n",
1056 			   map->name, map->name);
1057 		return -EINVAL;
1058 	}
1059 
1060 	container_id = btf__find_by_name(btf, container_name);
1061 	if (container_id < 0) {
1062 		pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
1063 			 map->name, container_name);
1064 		return container_id;
1065 	}
1066 
1067 	container_type = btf__type_by_id(btf, container_id);
1068 	if (!container_type) {
1069 		pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1070 			   map->name, container_id);
1071 		return -EINVAL;
1072 	}
1073 
1074 	if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1075 	    BTF_INFO_VLEN(container_type->info) < 2) {
1076 		pr_warning("map:%s container_name:%s is an invalid container struct\n",
1077 			   map->name, container_name);
1078 		return -EINVAL;
1079 	}
1080 
1081 	key = (struct btf_member *)(container_type + 1);
1082 	value = key + 1;
1083 
1084 	key_size = btf__resolve_size(btf, key->type);
1085 	if (key_size < 0) {
1086 		pr_warning("map:%s invalid BTF key_type_size\n",
1087 			   map->name);
1088 		return key_size;
1089 	}
1090 
1091 	if (def->key_size != key_size) {
1092 		pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1093 			   map->name, (__u32)key_size, def->key_size);
1094 		return -EINVAL;
1095 	}
1096 
1097 	value_size = btf__resolve_size(btf, value->type);
1098 	if (value_size < 0) {
1099 		pr_warning("map:%s invalid BTF value_type_size\n", map->name);
1100 		return value_size;
1101 	}
1102 
1103 	if (def->value_size != value_size) {
1104 		pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1105 			   map->name, (__u32)value_size, def->value_size);
1106 		return -EINVAL;
1107 	}
1108 
1109 	map->btf_key_type_id = key->type;
1110 	map->btf_value_type_id = value->type;
1111 
1112 	return 0;
1113 }
1114 
1115 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1116 {
1117 	struct bpf_map_info info = {};
1118 	__u32 len = sizeof(info);
1119 	int new_fd, err;
1120 	char *new_name;
1121 
1122 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
1123 	if (err)
1124 		return err;
1125 
1126 	new_name = strdup(info.name);
1127 	if (!new_name)
1128 		return -errno;
1129 
1130 	new_fd = open("/", O_RDONLY | O_CLOEXEC);
1131 	if (new_fd < 0)
1132 		goto err_free_new_name;
1133 
1134 	new_fd = dup3(fd, new_fd, O_CLOEXEC);
1135 	if (new_fd < 0)
1136 		goto err_close_new_fd;
1137 
1138 	err = zclose(map->fd);
1139 	if (err)
1140 		goto err_close_new_fd;
1141 	free(map->name);
1142 
1143 	map->fd = new_fd;
1144 	map->name = new_name;
1145 	map->def.type = info.type;
1146 	map->def.key_size = info.key_size;
1147 	map->def.value_size = info.value_size;
1148 	map->def.max_entries = info.max_entries;
1149 	map->def.map_flags = info.map_flags;
1150 	map->btf_key_type_id = info.btf_key_type_id;
1151 	map->btf_value_type_id = info.btf_value_type_id;
1152 
1153 	return 0;
1154 
1155 err_close_new_fd:
1156 	close(new_fd);
1157 err_free_new_name:
1158 	free(new_name);
1159 	return -errno;
1160 }
1161 
1162 static int
1163 bpf_object__probe_name(struct bpf_object *obj)
1164 {
1165 	struct bpf_load_program_attr attr;
1166 	char *cp, errmsg[STRERR_BUFSIZE];
1167 	struct bpf_insn insns[] = {
1168 		BPF_MOV64_IMM(BPF_REG_0, 0),
1169 		BPF_EXIT_INSN(),
1170 	};
1171 	int ret;
1172 
1173 	/* make sure basic loading works */
1174 
1175 	memset(&attr, 0, sizeof(attr));
1176 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1177 	attr.insns = insns;
1178 	attr.insns_cnt = ARRAY_SIZE(insns);
1179 	attr.license = "GPL";
1180 
1181 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1182 	if (ret < 0) {
1183 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1184 		pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1185 			   __func__, cp, errno);
1186 		return -errno;
1187 	}
1188 	close(ret);
1189 
1190 	/* now try the same program, but with the name */
1191 
1192 	attr.name = "test";
1193 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1194 	if (ret >= 0) {
1195 		obj->caps.name = 1;
1196 		close(ret);
1197 	}
1198 
1199 	return 0;
1200 }
1201 
1202 static int
1203 bpf_object__probe_caps(struct bpf_object *obj)
1204 {
1205 	return bpf_object__probe_name(obj);
1206 }
1207 
1208 static int
1209 bpf_object__create_maps(struct bpf_object *obj)
1210 {
1211 	struct bpf_create_map_attr create_attr = {};
1212 	unsigned int i;
1213 	int err;
1214 
1215 	for (i = 0; i < obj->nr_maps; i++) {
1216 		struct bpf_map *map = &obj->maps[i];
1217 		struct bpf_map_def *def = &map->def;
1218 		char *cp, errmsg[STRERR_BUFSIZE];
1219 		int *pfd = &map->fd;
1220 
1221 		if (map->fd >= 0) {
1222 			pr_debug("skip map create (preset) %s: fd=%d\n",
1223 				 map->name, map->fd);
1224 			continue;
1225 		}
1226 
1227 		if (obj->caps.name)
1228 			create_attr.name = map->name;
1229 		create_attr.map_ifindex = map->map_ifindex;
1230 		create_attr.map_type = def->type;
1231 		create_attr.map_flags = def->map_flags;
1232 		create_attr.key_size = def->key_size;
1233 		create_attr.value_size = def->value_size;
1234 		create_attr.max_entries = def->max_entries;
1235 		create_attr.btf_fd = 0;
1236 		create_attr.btf_key_type_id = 0;
1237 		create_attr.btf_value_type_id = 0;
1238 		if (bpf_map_type__is_map_in_map(def->type) &&
1239 		    map->inner_map_fd >= 0)
1240 			create_attr.inner_map_fd = map->inner_map_fd;
1241 
1242 		if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1243 			create_attr.btf_fd = btf__fd(obj->btf);
1244 			create_attr.btf_key_type_id = map->btf_key_type_id;
1245 			create_attr.btf_value_type_id = map->btf_value_type_id;
1246 		}
1247 
1248 		*pfd = bpf_create_map_xattr(&create_attr);
1249 		if (*pfd < 0 && create_attr.btf_key_type_id) {
1250 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1251 			pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1252 				   map->name, cp, errno);
1253 			create_attr.btf_fd = 0;
1254 			create_attr.btf_key_type_id = 0;
1255 			create_attr.btf_value_type_id = 0;
1256 			map->btf_key_type_id = 0;
1257 			map->btf_value_type_id = 0;
1258 			*pfd = bpf_create_map_xattr(&create_attr);
1259 		}
1260 
1261 		if (*pfd < 0) {
1262 			size_t j;
1263 
1264 			err = *pfd;
1265 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1266 			pr_warning("failed to create map (name: '%s'): %s\n",
1267 				   map->name, cp);
1268 			for (j = 0; j < i; j++)
1269 				zclose(obj->maps[j].fd);
1270 			return err;
1271 		}
1272 		pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1273 	}
1274 
1275 	return 0;
1276 }
1277 
1278 static int
1279 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1280 			struct reloc_desc *relo)
1281 {
1282 	struct bpf_insn *insn, *new_insn;
1283 	struct bpf_program *text;
1284 	size_t new_cnt;
1285 	int err;
1286 
1287 	if (relo->type != RELO_CALL)
1288 		return -LIBBPF_ERRNO__RELOC;
1289 
1290 	if (prog->idx == obj->efile.text_shndx) {
1291 		pr_warning("relo in .text insn %d into off %d\n",
1292 			   relo->insn_idx, relo->text_off);
1293 		return -LIBBPF_ERRNO__RELOC;
1294 	}
1295 
1296 	if (prog->main_prog_cnt == 0) {
1297 		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1298 		if (!text) {
1299 			pr_warning("no .text section found yet relo into text exist\n");
1300 			return -LIBBPF_ERRNO__RELOC;
1301 		}
1302 		new_cnt = prog->insns_cnt + text->insns_cnt;
1303 		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
1304 		if (!new_insn) {
1305 			pr_warning("oom in prog realloc\n");
1306 			return -ENOMEM;
1307 		}
1308 
1309 		if (obj->btf && obj->btf_ext) {
1310 			err = btf_ext__reloc(obj->btf, obj->btf_ext,
1311 					     text->section_name,
1312 					     prog->insns_cnt,
1313 					     &prog->func_info,
1314 					     &prog->func_info_len);
1315 			if (err) {
1316 				pr_warning("error in btf_ext__reloc for sec %s\n",
1317 					   text->section_name);
1318 				return err;
1319 			}
1320 		}
1321 
1322 		memcpy(new_insn + prog->insns_cnt, text->insns,
1323 		       text->insns_cnt * sizeof(*insn));
1324 		prog->insns = new_insn;
1325 		prog->main_prog_cnt = prog->insns_cnt;
1326 		prog->insns_cnt = new_cnt;
1327 		pr_debug("added %zd insn from %s to prog %s\n",
1328 			 text->insns_cnt, text->section_name,
1329 			 prog->section_name);
1330 	}
1331 	insn = &prog->insns[relo->insn_idx];
1332 	insn->imm += prog->main_prog_cnt - relo->insn_idx;
1333 	return 0;
1334 }
1335 
1336 static int
1337 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1338 {
1339 	int i, err;
1340 
1341 	if (!prog)
1342 		return 0;
1343 
1344 	if (obj->btf && obj->btf_ext) {
1345 		err = btf_ext__reloc_init(obj->btf, obj->btf_ext,
1346 					  prog->section_name,
1347 					  &prog->func_info,
1348 					  &prog->func_info_rec_size,
1349 					  &prog->func_info_len);
1350 		if (err) {
1351 			pr_warning("err in btf_ext__reloc_init for sec %s\n",
1352 				   prog->section_name);
1353 			return err;
1354 		}
1355 		prog->btf_fd = btf__fd(obj->btf);
1356 	}
1357 
1358 	if (!prog->reloc_desc)
1359 		return 0;
1360 
1361 	for (i = 0; i < prog->nr_reloc; i++) {
1362 		if (prog->reloc_desc[i].type == RELO_LD64) {
1363 			struct bpf_insn *insns = prog->insns;
1364 			int insn_idx, map_idx;
1365 
1366 			insn_idx = prog->reloc_desc[i].insn_idx;
1367 			map_idx = prog->reloc_desc[i].map_idx;
1368 
1369 			if (insn_idx >= (int)prog->insns_cnt) {
1370 				pr_warning("relocation out of range: '%s'\n",
1371 					   prog->section_name);
1372 				return -LIBBPF_ERRNO__RELOC;
1373 			}
1374 			insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1375 			insns[insn_idx].imm = obj->maps[map_idx].fd;
1376 		} else {
1377 			err = bpf_program__reloc_text(prog, obj,
1378 						      &prog->reloc_desc[i]);
1379 			if (err)
1380 				return err;
1381 		}
1382 	}
1383 
1384 	zfree(&prog->reloc_desc);
1385 	prog->nr_reloc = 0;
1386 	return 0;
1387 }
1388 
1389 
1390 static int
1391 bpf_object__relocate(struct bpf_object *obj)
1392 {
1393 	struct bpf_program *prog;
1394 	size_t i;
1395 	int err;
1396 
1397 	for (i = 0; i < obj->nr_programs; i++) {
1398 		prog = &obj->programs[i];
1399 
1400 		err = bpf_program__relocate(prog, obj);
1401 		if (err) {
1402 			pr_warning("failed to relocate '%s'\n",
1403 				   prog->section_name);
1404 			return err;
1405 		}
1406 	}
1407 	return 0;
1408 }
1409 
1410 static int bpf_object__collect_reloc(struct bpf_object *obj)
1411 {
1412 	int i, err;
1413 
1414 	if (!obj_elf_valid(obj)) {
1415 		pr_warning("Internal error: elf object is closed\n");
1416 		return -LIBBPF_ERRNO__INTERNAL;
1417 	}
1418 
1419 	for (i = 0; i < obj->efile.nr_reloc; i++) {
1420 		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1421 		Elf_Data *data = obj->efile.reloc[i].data;
1422 		int idx = shdr->sh_info;
1423 		struct bpf_program *prog;
1424 
1425 		if (shdr->sh_type != SHT_REL) {
1426 			pr_warning("internal error at %d\n", __LINE__);
1427 			return -LIBBPF_ERRNO__INTERNAL;
1428 		}
1429 
1430 		prog = bpf_object__find_prog_by_idx(obj, idx);
1431 		if (!prog) {
1432 			pr_warning("relocation failed: no section(%d)\n", idx);
1433 			return -LIBBPF_ERRNO__RELOC;
1434 		}
1435 
1436 		err = bpf_program__collect_reloc(prog,
1437 						 shdr, data,
1438 						 obj);
1439 		if (err)
1440 			return err;
1441 	}
1442 	return 0;
1443 }
1444 
1445 static int
1446 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
1447 	     char *license, __u32 kern_version, int *pfd,
1448 	     __u32 func_info_cnt)
1449 {
1450 	struct bpf_load_program_attr load_attr;
1451 	char *cp, errmsg[STRERR_BUFSIZE];
1452 	char *log_buf;
1453 	int ret;
1454 
1455 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1456 	load_attr.prog_type = prog->type;
1457 	load_attr.expected_attach_type = prog->expected_attach_type;
1458 	if (prog->caps->name)
1459 		load_attr.name = prog->name;
1460 	load_attr.insns = insns;
1461 	load_attr.insns_cnt = insns_cnt;
1462 	load_attr.license = license;
1463 	load_attr.kern_version = kern_version;
1464 	load_attr.prog_ifindex = prog->prog_ifindex;
1465 	load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
1466 	load_attr.func_info = prog->func_info;
1467 	load_attr.func_info_rec_size = prog->func_info_rec_size;
1468 	load_attr.func_info_cnt = func_info_cnt;
1469 
1470 	if (!load_attr.insns || !load_attr.insns_cnt)
1471 		return -EINVAL;
1472 
1473 	log_buf = malloc(BPF_LOG_BUF_SIZE);
1474 	if (!log_buf)
1475 		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1476 
1477 	ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
1478 
1479 	if (ret >= 0) {
1480 		*pfd = ret;
1481 		ret = 0;
1482 		goto out;
1483 	}
1484 
1485 	ret = -LIBBPF_ERRNO__LOAD;
1486 	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1487 	pr_warning("load bpf program failed: %s\n", cp);
1488 
1489 	if (log_buf && log_buf[0] != '\0') {
1490 		ret = -LIBBPF_ERRNO__VERIFY;
1491 		pr_warning("-- BEGIN DUMP LOG ---\n");
1492 		pr_warning("\n%s\n", log_buf);
1493 		pr_warning("-- END LOG --\n");
1494 	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1495 		pr_warning("Program too large (%zu insns), at most %d insns\n",
1496 			   load_attr.insns_cnt, BPF_MAXINSNS);
1497 		ret = -LIBBPF_ERRNO__PROG2BIG;
1498 	} else {
1499 		/* Wrong program type? */
1500 		if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1501 			int fd;
1502 
1503 			load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1504 			load_attr.expected_attach_type = 0;
1505 			fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1506 			if (fd >= 0) {
1507 				close(fd);
1508 				ret = -LIBBPF_ERRNO__PROGTYPE;
1509 				goto out;
1510 			}
1511 		}
1512 
1513 		if (log_buf)
1514 			ret = -LIBBPF_ERRNO__KVER;
1515 	}
1516 
1517 out:
1518 	free(log_buf);
1519 	return ret;
1520 }
1521 
1522 int
1523 bpf_program__load(struct bpf_program *prog,
1524 		  char *license, __u32 kern_version)
1525 {
1526 	__u32 func_info_cnt;
1527 	int err = 0, fd, i;
1528 
1529 	if (prog->func_info_len == 0)
1530 		func_info_cnt = 0;
1531 	else
1532 		func_info_cnt = prog->func_info_len / prog->func_info_rec_size;
1533 
1534 	if (prog->instances.nr < 0 || !prog->instances.fds) {
1535 		if (prog->preprocessor) {
1536 			pr_warning("Internal error: can't load program '%s'\n",
1537 				   prog->section_name);
1538 			return -LIBBPF_ERRNO__INTERNAL;
1539 		}
1540 
1541 		prog->instances.fds = malloc(sizeof(int));
1542 		if (!prog->instances.fds) {
1543 			pr_warning("Not enough memory for BPF fds\n");
1544 			return -ENOMEM;
1545 		}
1546 		prog->instances.nr = 1;
1547 		prog->instances.fds[0] = -1;
1548 	}
1549 
1550 	if (!prog->preprocessor) {
1551 		if (prog->instances.nr != 1) {
1552 			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1553 				   prog->section_name, prog->instances.nr);
1554 		}
1555 		err = load_program(prog, prog->insns, prog->insns_cnt,
1556 				   license, kern_version, &fd,
1557 				   func_info_cnt);
1558 		if (!err)
1559 			prog->instances.fds[0] = fd;
1560 		goto out;
1561 	}
1562 
1563 	for (i = 0; i < prog->instances.nr; i++) {
1564 		struct bpf_prog_prep_result result;
1565 		bpf_program_prep_t preprocessor = prog->preprocessor;
1566 
1567 		bzero(&result, sizeof(result));
1568 		err = preprocessor(prog, i, prog->insns,
1569 				   prog->insns_cnt, &result);
1570 		if (err) {
1571 			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1572 				   i, prog->section_name);
1573 			goto out;
1574 		}
1575 
1576 		if (!result.new_insn_ptr || !result.new_insn_cnt) {
1577 			pr_debug("Skip loading the %dth instance of program '%s'\n",
1578 				 i, prog->section_name);
1579 			prog->instances.fds[i] = -1;
1580 			if (result.pfd)
1581 				*result.pfd = -1;
1582 			continue;
1583 		}
1584 
1585 		err = load_program(prog, result.new_insn_ptr,
1586 				   result.new_insn_cnt,
1587 				   license, kern_version, &fd,
1588 				   func_info_cnt);
1589 
1590 		if (err) {
1591 			pr_warning("Loading the %dth instance of program '%s' failed\n",
1592 					i, prog->section_name);
1593 			goto out;
1594 		}
1595 
1596 		if (result.pfd)
1597 			*result.pfd = fd;
1598 		prog->instances.fds[i] = fd;
1599 	}
1600 out:
1601 	if (err)
1602 		pr_warning("failed to load program '%s'\n",
1603 			   prog->section_name);
1604 	zfree(&prog->insns);
1605 	prog->insns_cnt = 0;
1606 	return err;
1607 }
1608 
1609 static bool bpf_program__is_function_storage(struct bpf_program *prog,
1610 					     struct bpf_object *obj)
1611 {
1612 	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1613 }
1614 
1615 static int
1616 bpf_object__load_progs(struct bpf_object *obj)
1617 {
1618 	size_t i;
1619 	int err;
1620 
1621 	for (i = 0; i < obj->nr_programs; i++) {
1622 		if (bpf_program__is_function_storage(&obj->programs[i], obj))
1623 			continue;
1624 		err = bpf_program__load(&obj->programs[i],
1625 					obj->license,
1626 					obj->kern_version);
1627 		if (err)
1628 			return err;
1629 	}
1630 	return 0;
1631 }
1632 
1633 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
1634 {
1635 	switch (type) {
1636 	case BPF_PROG_TYPE_SOCKET_FILTER:
1637 	case BPF_PROG_TYPE_SCHED_CLS:
1638 	case BPF_PROG_TYPE_SCHED_ACT:
1639 	case BPF_PROG_TYPE_XDP:
1640 	case BPF_PROG_TYPE_CGROUP_SKB:
1641 	case BPF_PROG_TYPE_CGROUP_SOCK:
1642 	case BPF_PROG_TYPE_LWT_IN:
1643 	case BPF_PROG_TYPE_LWT_OUT:
1644 	case BPF_PROG_TYPE_LWT_XMIT:
1645 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1646 	case BPF_PROG_TYPE_SOCK_OPS:
1647 	case BPF_PROG_TYPE_SK_SKB:
1648 	case BPF_PROG_TYPE_CGROUP_DEVICE:
1649 	case BPF_PROG_TYPE_SK_MSG:
1650 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1651 	case BPF_PROG_TYPE_LIRC_MODE2:
1652 	case BPF_PROG_TYPE_SK_REUSEPORT:
1653 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1654 	case BPF_PROG_TYPE_UNSPEC:
1655 	case BPF_PROG_TYPE_TRACEPOINT:
1656 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
1657 	case BPF_PROG_TYPE_PERF_EVENT:
1658 		return false;
1659 	case BPF_PROG_TYPE_KPROBE:
1660 	default:
1661 		return true;
1662 	}
1663 }
1664 
1665 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1666 {
1667 	if (needs_kver && obj->kern_version == 0) {
1668 		pr_warning("%s doesn't provide kernel version\n",
1669 			   obj->path);
1670 		return -LIBBPF_ERRNO__KVERSION;
1671 	}
1672 	return 0;
1673 }
1674 
1675 static struct bpf_object *
1676 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1677 		   bool needs_kver, int flags)
1678 {
1679 	struct bpf_object *obj;
1680 	int err;
1681 
1682 	if (elf_version(EV_CURRENT) == EV_NONE) {
1683 		pr_warning("failed to init libelf for %s\n", path);
1684 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1685 	}
1686 
1687 	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1688 	if (IS_ERR(obj))
1689 		return obj;
1690 
1691 	CHECK_ERR(bpf_object__elf_init(obj), err, out);
1692 	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1693 	CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
1694 	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1695 	CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
1696 
1697 	bpf_object__elf_finish(obj);
1698 	return obj;
1699 out:
1700 	bpf_object__close(obj);
1701 	return ERR_PTR(err);
1702 }
1703 
1704 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1705 					    int flags)
1706 {
1707 	/* param validation */
1708 	if (!attr->file)
1709 		return NULL;
1710 
1711 	pr_debug("loading %s\n", attr->file);
1712 
1713 	return __bpf_object__open(attr->file, NULL, 0,
1714 				  bpf_prog_type__needs_kver(attr->prog_type),
1715 				  flags);
1716 }
1717 
1718 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1719 {
1720 	return __bpf_object__open_xattr(attr, 0);
1721 }
1722 
1723 struct bpf_object *bpf_object__open(const char *path)
1724 {
1725 	struct bpf_object_open_attr attr = {
1726 		.file		= path,
1727 		.prog_type	= BPF_PROG_TYPE_UNSPEC,
1728 	};
1729 
1730 	return bpf_object__open_xattr(&attr);
1731 }
1732 
1733 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1734 					   size_t obj_buf_sz,
1735 					   const char *name)
1736 {
1737 	char tmp_name[64];
1738 
1739 	/* param validation */
1740 	if (!obj_buf || obj_buf_sz <= 0)
1741 		return NULL;
1742 
1743 	if (!name) {
1744 		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1745 			 (unsigned long)obj_buf,
1746 			 (unsigned long)obj_buf_sz);
1747 		tmp_name[sizeof(tmp_name) - 1] = '\0';
1748 		name = tmp_name;
1749 	}
1750 	pr_debug("loading object '%s' from buffer\n",
1751 		 name);
1752 
1753 	return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
1754 }
1755 
1756 int bpf_object__unload(struct bpf_object *obj)
1757 {
1758 	size_t i;
1759 
1760 	if (!obj)
1761 		return -EINVAL;
1762 
1763 	for (i = 0; i < obj->nr_maps; i++)
1764 		zclose(obj->maps[i].fd);
1765 
1766 	for (i = 0; i < obj->nr_programs; i++)
1767 		bpf_program__unload(&obj->programs[i]);
1768 
1769 	return 0;
1770 }
1771 
1772 int bpf_object__load(struct bpf_object *obj)
1773 {
1774 	int err;
1775 
1776 	if (!obj)
1777 		return -EINVAL;
1778 
1779 	if (obj->loaded) {
1780 		pr_warning("object should not be loaded twice\n");
1781 		return -EINVAL;
1782 	}
1783 
1784 	obj->loaded = true;
1785 
1786 	CHECK_ERR(bpf_object__probe_caps(obj), err, out);
1787 	CHECK_ERR(bpf_object__create_maps(obj), err, out);
1788 	CHECK_ERR(bpf_object__relocate(obj), err, out);
1789 	CHECK_ERR(bpf_object__load_progs(obj), err, out);
1790 
1791 	return 0;
1792 out:
1793 	bpf_object__unload(obj);
1794 	pr_warning("failed to load object '%s'\n", obj->path);
1795 	return err;
1796 }
1797 
1798 static int check_path(const char *path)
1799 {
1800 	char *cp, errmsg[STRERR_BUFSIZE];
1801 	struct statfs st_fs;
1802 	char *dname, *dir;
1803 	int err = 0;
1804 
1805 	if (path == NULL)
1806 		return -EINVAL;
1807 
1808 	dname = strdup(path);
1809 	if (dname == NULL)
1810 		return -ENOMEM;
1811 
1812 	dir = dirname(dname);
1813 	if (statfs(dir, &st_fs)) {
1814 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1815 		pr_warning("failed to statfs %s: %s\n", dir, cp);
1816 		err = -errno;
1817 	}
1818 	free(dname);
1819 
1820 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1821 		pr_warning("specified path %s is not on BPF FS\n", path);
1822 		err = -EINVAL;
1823 	}
1824 
1825 	return err;
1826 }
1827 
1828 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1829 			      int instance)
1830 {
1831 	char *cp, errmsg[STRERR_BUFSIZE];
1832 	int err;
1833 
1834 	err = check_path(path);
1835 	if (err)
1836 		return err;
1837 
1838 	if (prog == NULL) {
1839 		pr_warning("invalid program pointer\n");
1840 		return -EINVAL;
1841 	}
1842 
1843 	if (instance < 0 || instance >= prog->instances.nr) {
1844 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1845 			   instance, prog->section_name, prog->instances.nr);
1846 		return -EINVAL;
1847 	}
1848 
1849 	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1850 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1851 		pr_warning("failed to pin program: %s\n", cp);
1852 		return -errno;
1853 	}
1854 	pr_debug("pinned program '%s'\n", path);
1855 
1856 	return 0;
1857 }
1858 
1859 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
1860 				int instance)
1861 {
1862 	int err;
1863 
1864 	err = check_path(path);
1865 	if (err)
1866 		return err;
1867 
1868 	if (prog == NULL) {
1869 		pr_warning("invalid program pointer\n");
1870 		return -EINVAL;
1871 	}
1872 
1873 	if (instance < 0 || instance >= prog->instances.nr) {
1874 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1875 			   instance, prog->section_name, prog->instances.nr);
1876 		return -EINVAL;
1877 	}
1878 
1879 	err = unlink(path);
1880 	if (err != 0)
1881 		return -errno;
1882 	pr_debug("unpinned program '%s'\n", path);
1883 
1884 	return 0;
1885 }
1886 
1887 static int make_dir(const char *path)
1888 {
1889 	char *cp, errmsg[STRERR_BUFSIZE];
1890 	int err = 0;
1891 
1892 	if (mkdir(path, 0700) && errno != EEXIST)
1893 		err = -errno;
1894 
1895 	if (err) {
1896 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
1897 		pr_warning("failed to mkdir %s: %s\n", path, cp);
1898 	}
1899 	return err;
1900 }
1901 
1902 int bpf_program__pin(struct bpf_program *prog, const char *path)
1903 {
1904 	int i, err;
1905 
1906 	err = check_path(path);
1907 	if (err)
1908 		return err;
1909 
1910 	if (prog == NULL) {
1911 		pr_warning("invalid program pointer\n");
1912 		return -EINVAL;
1913 	}
1914 
1915 	if (prog->instances.nr <= 0) {
1916 		pr_warning("no instances of prog %s to pin\n",
1917 			   prog->section_name);
1918 		return -EINVAL;
1919 	}
1920 
1921 	if (prog->instances.nr == 1) {
1922 		/* don't create subdirs when pinning single instance */
1923 		return bpf_program__pin_instance(prog, path, 0);
1924 	}
1925 
1926 	err = make_dir(path);
1927 	if (err)
1928 		return err;
1929 
1930 	for (i = 0; i < prog->instances.nr; i++) {
1931 		char buf[PATH_MAX];
1932 		int len;
1933 
1934 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1935 		if (len < 0) {
1936 			err = -EINVAL;
1937 			goto err_unpin;
1938 		} else if (len >= PATH_MAX) {
1939 			err = -ENAMETOOLONG;
1940 			goto err_unpin;
1941 		}
1942 
1943 		err = bpf_program__pin_instance(prog, buf, i);
1944 		if (err)
1945 			goto err_unpin;
1946 	}
1947 
1948 	return 0;
1949 
1950 err_unpin:
1951 	for (i = i - 1; i >= 0; i--) {
1952 		char buf[PATH_MAX];
1953 		int len;
1954 
1955 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1956 		if (len < 0)
1957 			continue;
1958 		else if (len >= PATH_MAX)
1959 			continue;
1960 
1961 		bpf_program__unpin_instance(prog, buf, i);
1962 	}
1963 
1964 	rmdir(path);
1965 
1966 	return err;
1967 }
1968 
1969 int bpf_program__unpin(struct bpf_program *prog, const char *path)
1970 {
1971 	int i, err;
1972 
1973 	err = check_path(path);
1974 	if (err)
1975 		return err;
1976 
1977 	if (prog == NULL) {
1978 		pr_warning("invalid program pointer\n");
1979 		return -EINVAL;
1980 	}
1981 
1982 	if (prog->instances.nr <= 0) {
1983 		pr_warning("no instances of prog %s to pin\n",
1984 			   prog->section_name);
1985 		return -EINVAL;
1986 	}
1987 
1988 	if (prog->instances.nr == 1) {
1989 		/* don't create subdirs when pinning single instance */
1990 		return bpf_program__unpin_instance(prog, path, 0);
1991 	}
1992 
1993 	for (i = 0; i < prog->instances.nr; i++) {
1994 		char buf[PATH_MAX];
1995 		int len;
1996 
1997 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
1998 		if (len < 0)
1999 			return -EINVAL;
2000 		else if (len >= PATH_MAX)
2001 			return -ENAMETOOLONG;
2002 
2003 		err = bpf_program__unpin_instance(prog, buf, i);
2004 		if (err)
2005 			return err;
2006 	}
2007 
2008 	err = rmdir(path);
2009 	if (err)
2010 		return -errno;
2011 
2012 	return 0;
2013 }
2014 
2015 int bpf_map__pin(struct bpf_map *map, const char *path)
2016 {
2017 	char *cp, errmsg[STRERR_BUFSIZE];
2018 	int err;
2019 
2020 	err = check_path(path);
2021 	if (err)
2022 		return err;
2023 
2024 	if (map == NULL) {
2025 		pr_warning("invalid map pointer\n");
2026 		return -EINVAL;
2027 	}
2028 
2029 	if (bpf_obj_pin(map->fd, path)) {
2030 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2031 		pr_warning("failed to pin map: %s\n", cp);
2032 		return -errno;
2033 	}
2034 
2035 	pr_debug("pinned map '%s'\n", path);
2036 
2037 	return 0;
2038 }
2039 
2040 int bpf_map__unpin(struct bpf_map *map, const char *path)
2041 {
2042 	int err;
2043 
2044 	err = check_path(path);
2045 	if (err)
2046 		return err;
2047 
2048 	if (map == NULL) {
2049 		pr_warning("invalid map pointer\n");
2050 		return -EINVAL;
2051 	}
2052 
2053 	err = unlink(path);
2054 	if (err != 0)
2055 		return -errno;
2056 	pr_debug("unpinned map '%s'\n", path);
2057 
2058 	return 0;
2059 }
2060 
2061 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
2062 {
2063 	struct bpf_map *map;
2064 	int err;
2065 
2066 	if (!obj)
2067 		return -ENOENT;
2068 
2069 	if (!obj->loaded) {
2070 		pr_warning("object not yet loaded; load it first\n");
2071 		return -ENOENT;
2072 	}
2073 
2074 	err = make_dir(path);
2075 	if (err)
2076 		return err;
2077 
2078 	bpf_map__for_each(map, obj) {
2079 		char buf[PATH_MAX];
2080 		int len;
2081 
2082 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2083 			       bpf_map__name(map));
2084 		if (len < 0) {
2085 			err = -EINVAL;
2086 			goto err_unpin_maps;
2087 		} else if (len >= PATH_MAX) {
2088 			err = -ENAMETOOLONG;
2089 			goto err_unpin_maps;
2090 		}
2091 
2092 		err = bpf_map__pin(map, buf);
2093 		if (err)
2094 			goto err_unpin_maps;
2095 	}
2096 
2097 	return 0;
2098 
2099 err_unpin_maps:
2100 	while ((map = bpf_map__prev(map, obj))) {
2101 		char buf[PATH_MAX];
2102 		int len;
2103 
2104 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2105 			       bpf_map__name(map));
2106 		if (len < 0)
2107 			continue;
2108 		else if (len >= PATH_MAX)
2109 			continue;
2110 
2111 		bpf_map__unpin(map, buf);
2112 	}
2113 
2114 	return err;
2115 }
2116 
2117 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
2118 {
2119 	struct bpf_map *map;
2120 	int err;
2121 
2122 	if (!obj)
2123 		return -ENOENT;
2124 
2125 	bpf_map__for_each(map, obj) {
2126 		char buf[PATH_MAX];
2127 		int len;
2128 
2129 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2130 			       bpf_map__name(map));
2131 		if (len < 0)
2132 			return -EINVAL;
2133 		else if (len >= PATH_MAX)
2134 			return -ENAMETOOLONG;
2135 
2136 		err = bpf_map__unpin(map, buf);
2137 		if (err)
2138 			return err;
2139 	}
2140 
2141 	return 0;
2142 }
2143 
2144 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
2145 {
2146 	struct bpf_program *prog;
2147 	int err;
2148 
2149 	if (!obj)
2150 		return -ENOENT;
2151 
2152 	if (!obj->loaded) {
2153 		pr_warning("object not yet loaded; load it first\n");
2154 		return -ENOENT;
2155 	}
2156 
2157 	err = make_dir(path);
2158 	if (err)
2159 		return err;
2160 
2161 	bpf_object__for_each_program(prog, obj) {
2162 		char buf[PATH_MAX];
2163 		int len;
2164 
2165 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2166 			       prog->pin_name);
2167 		if (len < 0) {
2168 			err = -EINVAL;
2169 			goto err_unpin_programs;
2170 		} else if (len >= PATH_MAX) {
2171 			err = -ENAMETOOLONG;
2172 			goto err_unpin_programs;
2173 		}
2174 
2175 		err = bpf_program__pin(prog, buf);
2176 		if (err)
2177 			goto err_unpin_programs;
2178 	}
2179 
2180 	return 0;
2181 
2182 err_unpin_programs:
2183 	while ((prog = bpf_program__prev(prog, obj))) {
2184 		char buf[PATH_MAX];
2185 		int len;
2186 
2187 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2188 			       prog->pin_name);
2189 		if (len < 0)
2190 			continue;
2191 		else if (len >= PATH_MAX)
2192 			continue;
2193 
2194 		bpf_program__unpin(prog, buf);
2195 	}
2196 
2197 	return err;
2198 }
2199 
2200 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2201 {
2202 	struct bpf_program *prog;
2203 	int err;
2204 
2205 	if (!obj)
2206 		return -ENOENT;
2207 
2208 	bpf_object__for_each_program(prog, obj) {
2209 		char buf[PATH_MAX];
2210 		int len;
2211 
2212 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2213 			       prog->pin_name);
2214 		if (len < 0)
2215 			return -EINVAL;
2216 		else if (len >= PATH_MAX)
2217 			return -ENAMETOOLONG;
2218 
2219 		err = bpf_program__unpin(prog, buf);
2220 		if (err)
2221 			return err;
2222 	}
2223 
2224 	return 0;
2225 }
2226 
2227 int bpf_object__pin(struct bpf_object *obj, const char *path)
2228 {
2229 	int err;
2230 
2231 	err = bpf_object__pin_maps(obj, path);
2232 	if (err)
2233 		return err;
2234 
2235 	err = bpf_object__pin_programs(obj, path);
2236 	if (err) {
2237 		bpf_object__unpin_maps(obj, path);
2238 		return err;
2239 	}
2240 
2241 	return 0;
2242 }
2243 
2244 void bpf_object__close(struct bpf_object *obj)
2245 {
2246 	size_t i;
2247 
2248 	if (!obj)
2249 		return;
2250 
2251 	if (obj->clear_priv)
2252 		obj->clear_priv(obj, obj->priv);
2253 
2254 	bpf_object__elf_finish(obj);
2255 	bpf_object__unload(obj);
2256 	btf__free(obj->btf);
2257 	btf_ext__free(obj->btf_ext);
2258 
2259 	for (i = 0; i < obj->nr_maps; i++) {
2260 		zfree(&obj->maps[i].name);
2261 		if (obj->maps[i].clear_priv)
2262 			obj->maps[i].clear_priv(&obj->maps[i],
2263 						obj->maps[i].priv);
2264 		obj->maps[i].priv = NULL;
2265 		obj->maps[i].clear_priv = NULL;
2266 	}
2267 	zfree(&obj->maps);
2268 	obj->nr_maps = 0;
2269 
2270 	if (obj->programs && obj->nr_programs) {
2271 		for (i = 0; i < obj->nr_programs; i++)
2272 			bpf_program__exit(&obj->programs[i]);
2273 	}
2274 	zfree(&obj->programs);
2275 
2276 	list_del(&obj->list);
2277 	free(obj);
2278 }
2279 
2280 struct bpf_object *
2281 bpf_object__next(struct bpf_object *prev)
2282 {
2283 	struct bpf_object *next;
2284 
2285 	if (!prev)
2286 		next = list_first_entry(&bpf_objects_list,
2287 					struct bpf_object,
2288 					list);
2289 	else
2290 		next = list_next_entry(prev, list);
2291 
2292 	/* Empty list is noticed here so don't need checking on entry. */
2293 	if (&next->list == &bpf_objects_list)
2294 		return NULL;
2295 
2296 	return next;
2297 }
2298 
2299 const char *bpf_object__name(struct bpf_object *obj)
2300 {
2301 	return obj ? obj->path : ERR_PTR(-EINVAL);
2302 }
2303 
2304 unsigned int bpf_object__kversion(struct bpf_object *obj)
2305 {
2306 	return obj ? obj->kern_version : 0;
2307 }
2308 
2309 int bpf_object__btf_fd(const struct bpf_object *obj)
2310 {
2311 	return obj->btf ? btf__fd(obj->btf) : -1;
2312 }
2313 
2314 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2315 			 bpf_object_clear_priv_t clear_priv)
2316 {
2317 	if (obj->priv && obj->clear_priv)
2318 		obj->clear_priv(obj, obj->priv);
2319 
2320 	obj->priv = priv;
2321 	obj->clear_priv = clear_priv;
2322 	return 0;
2323 }
2324 
2325 void *bpf_object__priv(struct bpf_object *obj)
2326 {
2327 	return obj ? obj->priv : ERR_PTR(-EINVAL);
2328 }
2329 
2330 static struct bpf_program *
2331 __bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
2332 {
2333 	size_t nr_programs = obj->nr_programs;
2334 	ssize_t idx;
2335 
2336 	if (!nr_programs)
2337 		return NULL;
2338 
2339 	if (!p)
2340 		/* Iter from the beginning */
2341 		return forward ? &obj->programs[0] :
2342 			&obj->programs[nr_programs - 1];
2343 
2344 	if (p->obj != obj) {
2345 		pr_warning("error: program handler doesn't match object\n");
2346 		return NULL;
2347 	}
2348 
2349 	idx = (p - obj->programs) + (forward ? 1 : -1);
2350 	if (idx >= obj->nr_programs || idx < 0)
2351 		return NULL;
2352 	return &obj->programs[idx];
2353 }
2354 
2355 struct bpf_program *
2356 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2357 {
2358 	struct bpf_program *prog = prev;
2359 
2360 	do {
2361 		prog = __bpf_program__iter(prog, obj, true);
2362 	} while (prog && bpf_program__is_function_storage(prog, obj));
2363 
2364 	return prog;
2365 }
2366 
2367 struct bpf_program *
2368 bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2369 {
2370 	struct bpf_program *prog = next;
2371 
2372 	do {
2373 		prog = __bpf_program__iter(prog, obj, false);
2374 	} while (prog && bpf_program__is_function_storage(prog, obj));
2375 
2376 	return prog;
2377 }
2378 
2379 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
2380 			  bpf_program_clear_priv_t clear_priv)
2381 {
2382 	if (prog->priv && prog->clear_priv)
2383 		prog->clear_priv(prog, prog->priv);
2384 
2385 	prog->priv = priv;
2386 	prog->clear_priv = clear_priv;
2387 	return 0;
2388 }
2389 
2390 void *bpf_program__priv(struct bpf_program *prog)
2391 {
2392 	return prog ? prog->priv : ERR_PTR(-EINVAL);
2393 }
2394 
2395 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
2396 {
2397 	prog->prog_ifindex = ifindex;
2398 }
2399 
2400 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
2401 {
2402 	const char *title;
2403 
2404 	title = prog->section_name;
2405 	if (needs_copy) {
2406 		title = strdup(title);
2407 		if (!title) {
2408 			pr_warning("failed to strdup program title\n");
2409 			return ERR_PTR(-ENOMEM);
2410 		}
2411 	}
2412 
2413 	return title;
2414 }
2415 
2416 int bpf_program__fd(struct bpf_program *prog)
2417 {
2418 	return bpf_program__nth_fd(prog, 0);
2419 }
2420 
2421 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
2422 			  bpf_program_prep_t prep)
2423 {
2424 	int *instances_fds;
2425 
2426 	if (nr_instances <= 0 || !prep)
2427 		return -EINVAL;
2428 
2429 	if (prog->instances.nr > 0 || prog->instances.fds) {
2430 		pr_warning("Can't set pre-processor after loading\n");
2431 		return -EINVAL;
2432 	}
2433 
2434 	instances_fds = malloc(sizeof(int) * nr_instances);
2435 	if (!instances_fds) {
2436 		pr_warning("alloc memory failed for fds\n");
2437 		return -ENOMEM;
2438 	}
2439 
2440 	/* fill all fd with -1 */
2441 	memset(instances_fds, -1, sizeof(int) * nr_instances);
2442 
2443 	prog->instances.nr = nr_instances;
2444 	prog->instances.fds = instances_fds;
2445 	prog->preprocessor = prep;
2446 	return 0;
2447 }
2448 
2449 int bpf_program__nth_fd(struct bpf_program *prog, int n)
2450 {
2451 	int fd;
2452 
2453 	if (!prog)
2454 		return -EINVAL;
2455 
2456 	if (n >= prog->instances.nr || n < 0) {
2457 		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2458 			   n, prog->section_name, prog->instances.nr);
2459 		return -EINVAL;
2460 	}
2461 
2462 	fd = prog->instances.fds[n];
2463 	if (fd < 0) {
2464 		pr_warning("%dth instance of program '%s' is invalid\n",
2465 			   n, prog->section_name);
2466 		return -ENOENT;
2467 	}
2468 
2469 	return fd;
2470 }
2471 
2472 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
2473 {
2474 	prog->type = type;
2475 }
2476 
2477 static bool bpf_program__is_type(struct bpf_program *prog,
2478 				 enum bpf_prog_type type)
2479 {
2480 	return prog ? (prog->type == type) : false;
2481 }
2482 
2483 #define BPF_PROG_TYPE_FNS(NAME, TYPE)			\
2484 int bpf_program__set_##NAME(struct bpf_program *prog)	\
2485 {							\
2486 	if (!prog)					\
2487 		return -EINVAL;				\
2488 	bpf_program__set_type(prog, TYPE);		\
2489 	return 0;					\
2490 }							\
2491 							\
2492 bool bpf_program__is_##NAME(struct bpf_program *prog)	\
2493 {							\
2494 	return bpf_program__is_type(prog, TYPE);	\
2495 }							\
2496 
2497 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
2498 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
2499 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2500 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
2501 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
2502 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
2503 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2504 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
2505 
2506 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2507 					   enum bpf_attach_type type)
2508 {
2509 	prog->expected_attach_type = type;
2510 }
2511 
2512 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2513 	{ string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
2514 
2515 /* Programs that can NOT be attached. */
2516 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
2517 
2518 /* Programs that can be attached. */
2519 #define BPF_APROG_SEC(string, ptype, atype) \
2520 	BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
2521 
2522 /* Programs that must specify expected attach type at load time. */
2523 #define BPF_EAPROG_SEC(string, ptype, eatype) \
2524 	BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
2525 
2526 /* Programs that can be attached but attach type can't be identified by section
2527  * name. Kept for backward compatibility.
2528  */
2529 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
2530 
2531 static const struct {
2532 	const char *sec;
2533 	size_t len;
2534 	enum bpf_prog_type prog_type;
2535 	enum bpf_attach_type expected_attach_type;
2536 	int is_attachable;
2537 	enum bpf_attach_type attach_type;
2538 } section_names[] = {
2539 	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
2540 	BPF_PROG_SEC("kprobe/",			BPF_PROG_TYPE_KPROBE),
2541 	BPF_PROG_SEC("kretprobe/",		BPF_PROG_TYPE_KPROBE),
2542 	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
2543 	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
2544 	BPF_PROG_SEC("tracepoint/",		BPF_PROG_TYPE_TRACEPOINT),
2545 	BPF_PROG_SEC("raw_tracepoint/",		BPF_PROG_TYPE_RAW_TRACEPOINT),
2546 	BPF_PROG_SEC("xdp",			BPF_PROG_TYPE_XDP),
2547 	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
2548 	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
2549 	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
2550 	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
2551 	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
2552 	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
2553 						BPF_CGROUP_INET_INGRESS),
2554 	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
2555 						BPF_CGROUP_INET_EGRESS),
2556 	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
2557 	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
2558 						BPF_CGROUP_INET_SOCK_CREATE),
2559 	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
2560 						BPF_CGROUP_INET4_POST_BIND),
2561 	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
2562 						BPF_CGROUP_INET6_POST_BIND),
2563 	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
2564 						BPF_CGROUP_DEVICE),
2565 	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
2566 						BPF_CGROUP_SOCK_OPS),
2567 	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
2568 						BPF_SK_SKB_STREAM_PARSER),
2569 	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
2570 						BPF_SK_SKB_STREAM_VERDICT),
2571 	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
2572 	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
2573 						BPF_SK_MSG_VERDICT),
2574 	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
2575 						BPF_LIRC_MODE2),
2576 	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
2577 						BPF_FLOW_DISSECTOR),
2578 	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2579 						BPF_CGROUP_INET4_BIND),
2580 	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2581 						BPF_CGROUP_INET6_BIND),
2582 	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2583 						BPF_CGROUP_INET4_CONNECT),
2584 	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2585 						BPF_CGROUP_INET6_CONNECT),
2586 	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2587 						BPF_CGROUP_UDP4_SENDMSG),
2588 	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2589 						BPF_CGROUP_UDP6_SENDMSG),
2590 };
2591 
2592 #undef BPF_PROG_SEC_IMPL
2593 #undef BPF_PROG_SEC
2594 #undef BPF_APROG_SEC
2595 #undef BPF_EAPROG_SEC
2596 #undef BPF_APROG_COMPAT
2597 
2598 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2599 			     enum bpf_attach_type *expected_attach_type)
2600 {
2601 	int i;
2602 
2603 	if (!name)
2604 		return -EINVAL;
2605 
2606 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2607 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2608 			continue;
2609 		*prog_type = section_names[i].prog_type;
2610 		*expected_attach_type = section_names[i].expected_attach_type;
2611 		return 0;
2612 	}
2613 	return -EINVAL;
2614 }
2615 
2616 int libbpf_attach_type_by_name(const char *name,
2617 			       enum bpf_attach_type *attach_type)
2618 {
2619 	int i;
2620 
2621 	if (!name)
2622 		return -EINVAL;
2623 
2624 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2625 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2626 			continue;
2627 		if (!section_names[i].is_attachable)
2628 			return -EINVAL;
2629 		*attach_type = section_names[i].attach_type;
2630 		return 0;
2631 	}
2632 	return -EINVAL;
2633 }
2634 
2635 static int
2636 bpf_program__identify_section(struct bpf_program *prog,
2637 			      enum bpf_prog_type *prog_type,
2638 			      enum bpf_attach_type *expected_attach_type)
2639 {
2640 	return libbpf_prog_type_by_name(prog->section_name, prog_type,
2641 					expected_attach_type);
2642 }
2643 
2644 int bpf_map__fd(struct bpf_map *map)
2645 {
2646 	return map ? map->fd : -EINVAL;
2647 }
2648 
2649 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
2650 {
2651 	return map ? &map->def : ERR_PTR(-EINVAL);
2652 }
2653 
2654 const char *bpf_map__name(struct bpf_map *map)
2655 {
2656 	return map ? map->name : NULL;
2657 }
2658 
2659 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
2660 {
2661 	return map ? map->btf_key_type_id : 0;
2662 }
2663 
2664 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
2665 {
2666 	return map ? map->btf_value_type_id : 0;
2667 }
2668 
2669 int bpf_map__set_priv(struct bpf_map *map, void *priv,
2670 		     bpf_map_clear_priv_t clear_priv)
2671 {
2672 	if (!map)
2673 		return -EINVAL;
2674 
2675 	if (map->priv) {
2676 		if (map->clear_priv)
2677 			map->clear_priv(map, map->priv);
2678 	}
2679 
2680 	map->priv = priv;
2681 	map->clear_priv = clear_priv;
2682 	return 0;
2683 }
2684 
2685 void *bpf_map__priv(struct bpf_map *map)
2686 {
2687 	return map ? map->priv : ERR_PTR(-EINVAL);
2688 }
2689 
2690 bool bpf_map__is_offload_neutral(struct bpf_map *map)
2691 {
2692 	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2693 }
2694 
2695 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2696 {
2697 	map->map_ifindex = ifindex;
2698 }
2699 
2700 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
2701 {
2702 	if (!bpf_map_type__is_map_in_map(map->def.type)) {
2703 		pr_warning("error: unsupported map type\n");
2704 		return -EINVAL;
2705 	}
2706 	if (map->inner_map_fd != -1) {
2707 		pr_warning("error: inner_map_fd already specified\n");
2708 		return -EINVAL;
2709 	}
2710 	map->inner_map_fd = fd;
2711 	return 0;
2712 }
2713 
2714 static struct bpf_map *
2715 __bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
2716 {
2717 	ssize_t idx;
2718 	struct bpf_map *s, *e;
2719 
2720 	if (!obj || !obj->maps)
2721 		return NULL;
2722 
2723 	s = obj->maps;
2724 	e = obj->maps + obj->nr_maps;
2725 
2726 	if ((m < s) || (m >= e)) {
2727 		pr_warning("error in %s: map handler doesn't belong to object\n",
2728 			   __func__);
2729 		return NULL;
2730 	}
2731 
2732 	idx = (m - obj->maps) + i;
2733 	if (idx >= obj->nr_maps || idx < 0)
2734 		return NULL;
2735 	return &obj->maps[idx];
2736 }
2737 
2738 struct bpf_map *
2739 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2740 {
2741 	if (prev == NULL)
2742 		return obj->maps;
2743 
2744 	return __bpf_map__iter(prev, obj, 1);
2745 }
2746 
2747 struct bpf_map *
2748 bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
2749 {
2750 	if (next == NULL) {
2751 		if (!obj->nr_maps)
2752 			return NULL;
2753 		return obj->maps + obj->nr_maps - 1;
2754 	}
2755 
2756 	return __bpf_map__iter(next, obj, -1);
2757 }
2758 
2759 struct bpf_map *
2760 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
2761 {
2762 	struct bpf_map *pos;
2763 
2764 	bpf_map__for_each(pos, obj) {
2765 		if (pos->name && !strcmp(pos->name, name))
2766 			return pos;
2767 	}
2768 	return NULL;
2769 }
2770 
2771 struct bpf_map *
2772 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2773 {
2774 	int i;
2775 
2776 	for (i = 0; i < obj->nr_maps; i++) {
2777 		if (obj->maps[i].offset == offset)
2778 			return &obj->maps[i];
2779 	}
2780 	return ERR_PTR(-ENOENT);
2781 }
2782 
2783 long libbpf_get_error(const void *ptr)
2784 {
2785 	if (IS_ERR(ptr))
2786 		return PTR_ERR(ptr);
2787 	return 0;
2788 }
2789 
2790 int bpf_prog_load(const char *file, enum bpf_prog_type type,
2791 		  struct bpf_object **pobj, int *prog_fd)
2792 {
2793 	struct bpf_prog_load_attr attr;
2794 
2795 	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2796 	attr.file = file;
2797 	attr.prog_type = type;
2798 	attr.expected_attach_type = 0;
2799 
2800 	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2801 }
2802 
2803 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2804 			struct bpf_object **pobj, int *prog_fd)
2805 {
2806 	struct bpf_object_open_attr open_attr = {
2807 		.file		= attr->file,
2808 		.prog_type	= attr->prog_type,
2809 	};
2810 	struct bpf_program *prog, *first_prog = NULL;
2811 	enum bpf_attach_type expected_attach_type;
2812 	enum bpf_prog_type prog_type;
2813 	struct bpf_object *obj;
2814 	struct bpf_map *map;
2815 	int err;
2816 
2817 	if (!attr)
2818 		return -EINVAL;
2819 	if (!attr->file)
2820 		return -EINVAL;
2821 
2822 	obj = bpf_object__open_xattr(&open_attr);
2823 	if (IS_ERR_OR_NULL(obj))
2824 		return -ENOENT;
2825 
2826 	bpf_object__for_each_program(prog, obj) {
2827 		/*
2828 		 * If type is not specified, try to guess it based on
2829 		 * section name.
2830 		 */
2831 		prog_type = attr->prog_type;
2832 		prog->prog_ifindex = attr->ifindex;
2833 		expected_attach_type = attr->expected_attach_type;
2834 		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2835 			err = bpf_program__identify_section(prog, &prog_type,
2836 							    &expected_attach_type);
2837 			if (err < 0) {
2838 				pr_warning("failed to guess program type based on section name %s\n",
2839 					   prog->section_name);
2840 				bpf_object__close(obj);
2841 				return -EINVAL;
2842 			}
2843 		}
2844 
2845 		bpf_program__set_type(prog, prog_type);
2846 		bpf_program__set_expected_attach_type(prog,
2847 						      expected_attach_type);
2848 
2849 		if (!first_prog)
2850 			first_prog = prog;
2851 	}
2852 
2853 	bpf_map__for_each(map, obj) {
2854 		if (!bpf_map__is_offload_neutral(map))
2855 			map->map_ifindex = attr->ifindex;
2856 	}
2857 
2858 	if (!first_prog) {
2859 		pr_warning("object file doesn't contain bpf program\n");
2860 		bpf_object__close(obj);
2861 		return -ENOENT;
2862 	}
2863 
2864 	err = bpf_object__load(obj);
2865 	if (err) {
2866 		bpf_object__close(obj);
2867 		return -EINVAL;
2868 	}
2869 
2870 	*pobj = obj;
2871 	*prog_fd = bpf_program__fd(first_prog);
2872 	return 0;
2873 }
2874 
2875 enum bpf_perf_event_ret
2876 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2877 			   void **copy_mem, size_t *copy_size,
2878 			   bpf_perf_event_print_t fn, void *private_data)
2879 {
2880 	struct perf_event_mmap_page *header = mmap_mem;
2881 	__u64 data_head = ring_buffer_read_head(header);
2882 	__u64 data_tail = header->data_tail;
2883 	void *base = ((__u8 *)header) + page_size;
2884 	int ret = LIBBPF_PERF_EVENT_CONT;
2885 	struct perf_event_header *ehdr;
2886 	size_t ehdr_size;
2887 
2888 	while (data_head != data_tail) {
2889 		ehdr = base + (data_tail & (mmap_size - 1));
2890 		ehdr_size = ehdr->size;
2891 
2892 		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
2893 			void *copy_start = ehdr;
2894 			size_t len_first = base + mmap_size - copy_start;
2895 			size_t len_secnd = ehdr_size - len_first;
2896 
2897 			if (*copy_size < ehdr_size) {
2898 				free(*copy_mem);
2899 				*copy_mem = malloc(ehdr_size);
2900 				if (!*copy_mem) {
2901 					*copy_size = 0;
2902 					ret = LIBBPF_PERF_EVENT_ERROR;
2903 					break;
2904 				}
2905 				*copy_size = ehdr_size;
2906 			}
2907 
2908 			memcpy(*copy_mem, copy_start, len_first);
2909 			memcpy(*copy_mem + len_first, base, len_secnd);
2910 			ehdr = *copy_mem;
2911 		}
2912 
2913 		ret = fn(ehdr, private_data);
2914 		data_tail += ehdr_size;
2915 		if (ret != LIBBPF_PERF_EVENT_CONT)
2916 			break;
2917 	}
2918 
2919 	ring_buffer_write_tail(header, data_tail);
2920 	return ret;
2921 }
2922