xref: /openbmc/linux/tools/lib/bpf/libbpf.c (revision 69fad28c)
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2 
3 /*
4  * Common eBPF ELF object loading operations.
5  *
6  * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7  * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8  * Copyright (C) 2015 Huawei Inc.
9  * Copyright (C) 2017 Nicira, Inc.
10  */
11 
12 #ifndef _GNU_SOURCE
13 #define _GNU_SOURCE
14 #endif
15 #include <stdlib.h>
16 #include <stdio.h>
17 #include <stdarg.h>
18 #include <libgen.h>
19 #include <inttypes.h>
20 #include <string.h>
21 #include <unistd.h>
22 #include <fcntl.h>
23 #include <errno.h>
24 #include <asm/unistd.h>
25 #include <linux/err.h>
26 #include <linux/kernel.h>
27 #include <linux/bpf.h>
28 #include <linux/btf.h>
29 #include <linux/filter.h>
30 #include <linux/list.h>
31 #include <linux/limits.h>
32 #include <linux/perf_event.h>
33 #include <linux/ring_buffer.h>
34 #include <sys/stat.h>
35 #include <sys/types.h>
36 #include <sys/vfs.h>
37 #include <tools/libc_compat.h>
38 #include <libelf.h>
39 #include <gelf.h>
40 
41 #include "libbpf.h"
42 #include "bpf.h"
43 #include "btf.h"
44 #include "str_error.h"
45 
46 #ifndef EM_BPF
47 #define EM_BPF 247
48 #endif
49 
50 #ifndef BPF_FS_MAGIC
51 #define BPF_FS_MAGIC		0xcafe4a11
52 #endif
53 
54 #define __printf(a, b)	__attribute__((format(printf, a, b)))
55 
56 __printf(1, 2)
57 static int __base_pr(const char *format, ...)
58 {
59 	va_list args;
60 	int err;
61 
62 	va_start(args, format);
63 	err = vfprintf(stderr, format, args);
64 	va_end(args);
65 	return err;
66 }
67 
68 static __printf(1, 2) libbpf_print_fn_t __pr_warning = __base_pr;
69 static __printf(1, 2) libbpf_print_fn_t __pr_info = __base_pr;
70 static __printf(1, 2) libbpf_print_fn_t __pr_debug;
71 
72 #define __pr(func, fmt, ...)	\
73 do {				\
74 	if ((func))		\
75 		(func)("libbpf: " fmt, ##__VA_ARGS__); \
76 } while (0)
77 
78 #define pr_warning(fmt, ...)	__pr(__pr_warning, fmt, ##__VA_ARGS__)
79 #define pr_info(fmt, ...)	__pr(__pr_info, fmt, ##__VA_ARGS__)
80 #define pr_debug(fmt, ...)	__pr(__pr_debug, fmt, ##__VA_ARGS__)
81 
82 void libbpf_set_print(libbpf_print_fn_t warn,
83 		      libbpf_print_fn_t info,
84 		      libbpf_print_fn_t debug)
85 {
86 	__pr_warning = warn;
87 	__pr_info = info;
88 	__pr_debug = debug;
89 }
90 
91 #define STRERR_BUFSIZE  128
92 
93 #define CHECK_ERR(action, err, out) do {	\
94 	err = action;			\
95 	if (err)			\
96 		goto out;		\
97 } while(0)
98 
99 
100 /* Copied from tools/perf/util/util.h */
101 #ifndef zfree
102 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
103 #endif
104 
105 #ifndef zclose
106 # define zclose(fd) ({			\
107 	int ___err = 0;			\
108 	if ((fd) >= 0)			\
109 		___err = close((fd));	\
110 	fd = -1;			\
111 	___err; })
112 #endif
113 
114 #ifdef HAVE_LIBELF_MMAP_SUPPORT
115 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ_MMAP
116 #else
117 # define LIBBPF_ELF_C_READ_MMAP ELF_C_READ
118 #endif
119 
120 struct bpf_capabilities {
121 	/* v4.14: kernel support for program & map names. */
122 	__u32 name:1;
123 };
124 
125 /*
126  * bpf_prog should be a better name but it has been used in
127  * linux/filter.h.
128  */
129 struct bpf_program {
130 	/* Index in elf obj file, for relocation use. */
131 	int idx;
132 	char *name;
133 	int prog_ifindex;
134 	char *section_name;
135 	/* section_name with / replaced by _; makes recursive pinning
136 	 * in bpf_object__pin_programs easier
137 	 */
138 	char *pin_name;
139 	struct bpf_insn *insns;
140 	size_t insns_cnt, main_prog_cnt;
141 	enum bpf_prog_type type;
142 
143 	struct reloc_desc {
144 		enum {
145 			RELO_LD64,
146 			RELO_CALL,
147 		} type;
148 		int insn_idx;
149 		union {
150 			int map_idx;
151 			int text_off;
152 		};
153 	} *reloc_desc;
154 	int nr_reloc;
155 
156 	struct {
157 		int nr;
158 		int *fds;
159 	} instances;
160 	bpf_program_prep_t preprocessor;
161 
162 	struct bpf_object *obj;
163 	void *priv;
164 	bpf_program_clear_priv_t clear_priv;
165 
166 	enum bpf_attach_type expected_attach_type;
167 	int btf_fd;
168 	void *func_info;
169 	__u32 func_info_rec_size;
170 	__u32 func_info_cnt;
171 
172 	struct bpf_capabilities *caps;
173 
174 	void *line_info;
175 	__u32 line_info_rec_size;
176 	__u32 line_info_cnt;
177 };
178 
179 struct bpf_map {
180 	int fd;
181 	char *name;
182 	size_t offset;
183 	int map_ifindex;
184 	int inner_map_fd;
185 	struct bpf_map_def def;
186 	__u32 btf_key_type_id;
187 	__u32 btf_value_type_id;
188 	void *priv;
189 	bpf_map_clear_priv_t clear_priv;
190 };
191 
192 static LIST_HEAD(bpf_objects_list);
193 
194 struct bpf_object {
195 	char license[64];
196 	__u32 kern_version;
197 
198 	struct bpf_program *programs;
199 	size_t nr_programs;
200 	struct bpf_map *maps;
201 	size_t nr_maps;
202 
203 	bool loaded;
204 	bool has_pseudo_calls;
205 
206 	/*
207 	 * Information when doing elf related work. Only valid if fd
208 	 * is valid.
209 	 */
210 	struct {
211 		int fd;
212 		void *obj_buf;
213 		size_t obj_buf_sz;
214 		Elf *elf;
215 		GElf_Ehdr ehdr;
216 		Elf_Data *symbols;
217 		size_t strtabidx;
218 		struct {
219 			GElf_Shdr shdr;
220 			Elf_Data *data;
221 		} *reloc;
222 		int nr_reloc;
223 		int maps_shndx;
224 		int text_shndx;
225 	} efile;
226 	/*
227 	 * All loaded bpf_object is linked in a list, which is
228 	 * hidden to caller. bpf_objects__<func> handlers deal with
229 	 * all objects.
230 	 */
231 	struct list_head list;
232 
233 	struct btf *btf;
234 	struct btf_ext *btf_ext;
235 
236 	void *priv;
237 	bpf_object_clear_priv_t clear_priv;
238 
239 	struct bpf_capabilities caps;
240 
241 	char path[];
242 };
243 #define obj_elf_valid(o)	((o)->efile.elf)
244 
245 void bpf_program__unload(struct bpf_program *prog)
246 {
247 	int i;
248 
249 	if (!prog)
250 		return;
251 
252 	/*
253 	 * If the object is opened but the program was never loaded,
254 	 * it is possible that prog->instances.nr == -1.
255 	 */
256 	if (prog->instances.nr > 0) {
257 		for (i = 0; i < prog->instances.nr; i++)
258 			zclose(prog->instances.fds[i]);
259 	} else if (prog->instances.nr != -1) {
260 		pr_warning("Internal error: instances.nr is %d\n",
261 			   prog->instances.nr);
262 	}
263 
264 	prog->instances.nr = -1;
265 	zfree(&prog->instances.fds);
266 
267 	zclose(prog->btf_fd);
268 	zfree(&prog->func_info);
269 	zfree(&prog->line_info);
270 }
271 
272 static void bpf_program__exit(struct bpf_program *prog)
273 {
274 	if (!prog)
275 		return;
276 
277 	if (prog->clear_priv)
278 		prog->clear_priv(prog, prog->priv);
279 
280 	prog->priv = NULL;
281 	prog->clear_priv = NULL;
282 
283 	bpf_program__unload(prog);
284 	zfree(&prog->name);
285 	zfree(&prog->section_name);
286 	zfree(&prog->pin_name);
287 	zfree(&prog->insns);
288 	zfree(&prog->reloc_desc);
289 
290 	prog->nr_reloc = 0;
291 	prog->insns_cnt = 0;
292 	prog->idx = -1;
293 }
294 
295 static char *__bpf_program__pin_name(struct bpf_program *prog)
296 {
297 	char *name, *p;
298 
299 	name = p = strdup(prog->section_name);
300 	while ((p = strchr(p, '/')))
301 		*p = '_';
302 
303 	return name;
304 }
305 
306 static int
307 bpf_program__init(void *data, size_t size, char *section_name, int idx,
308 		  struct bpf_program *prog)
309 {
310 	if (size < sizeof(struct bpf_insn)) {
311 		pr_warning("corrupted section '%s'\n", section_name);
312 		return -EINVAL;
313 	}
314 
315 	bzero(prog, sizeof(*prog));
316 
317 	prog->section_name = strdup(section_name);
318 	if (!prog->section_name) {
319 		pr_warning("failed to alloc name for prog under section(%d) %s\n",
320 			   idx, section_name);
321 		goto errout;
322 	}
323 
324 	prog->pin_name = __bpf_program__pin_name(prog);
325 	if (!prog->pin_name) {
326 		pr_warning("failed to alloc pin name for prog under section(%d) %s\n",
327 			   idx, section_name);
328 		goto errout;
329 	}
330 
331 	prog->insns = malloc(size);
332 	if (!prog->insns) {
333 		pr_warning("failed to alloc insns for prog under section %s\n",
334 			   section_name);
335 		goto errout;
336 	}
337 	prog->insns_cnt = size / sizeof(struct bpf_insn);
338 	memcpy(prog->insns, data,
339 	       prog->insns_cnt * sizeof(struct bpf_insn));
340 	prog->idx = idx;
341 	prog->instances.fds = NULL;
342 	prog->instances.nr = -1;
343 	prog->type = BPF_PROG_TYPE_UNSPEC;
344 	prog->btf_fd = -1;
345 
346 	return 0;
347 errout:
348 	bpf_program__exit(prog);
349 	return -ENOMEM;
350 }
351 
352 static int
353 bpf_object__add_program(struct bpf_object *obj, void *data, size_t size,
354 			char *section_name, int idx)
355 {
356 	struct bpf_program prog, *progs;
357 	int nr_progs, err;
358 
359 	err = bpf_program__init(data, size, section_name, idx, &prog);
360 	if (err)
361 		return err;
362 
363 	prog.caps = &obj->caps;
364 	progs = obj->programs;
365 	nr_progs = obj->nr_programs;
366 
367 	progs = reallocarray(progs, nr_progs + 1, sizeof(progs[0]));
368 	if (!progs) {
369 		/*
370 		 * In this case the original obj->programs
371 		 * is still valid, so don't need special treat for
372 		 * bpf_close_object().
373 		 */
374 		pr_warning("failed to alloc a new program under section '%s'\n",
375 			   section_name);
376 		bpf_program__exit(&prog);
377 		return -ENOMEM;
378 	}
379 
380 	pr_debug("found program %s\n", prog.section_name);
381 	obj->programs = progs;
382 	obj->nr_programs = nr_progs + 1;
383 	prog.obj = obj;
384 	progs[nr_progs] = prog;
385 	return 0;
386 }
387 
388 static int
389 bpf_object__init_prog_names(struct bpf_object *obj)
390 {
391 	Elf_Data *symbols = obj->efile.symbols;
392 	struct bpf_program *prog;
393 	size_t pi, si;
394 
395 	for (pi = 0; pi < obj->nr_programs; pi++) {
396 		const char *name = NULL;
397 
398 		prog = &obj->programs[pi];
399 
400 		for (si = 0; si < symbols->d_size / sizeof(GElf_Sym) && !name;
401 		     si++) {
402 			GElf_Sym sym;
403 
404 			if (!gelf_getsym(symbols, si, &sym))
405 				continue;
406 			if (sym.st_shndx != prog->idx)
407 				continue;
408 			if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL)
409 				continue;
410 
411 			name = elf_strptr(obj->efile.elf,
412 					  obj->efile.strtabidx,
413 					  sym.st_name);
414 			if (!name) {
415 				pr_warning("failed to get sym name string for prog %s\n",
416 					   prog->section_name);
417 				return -LIBBPF_ERRNO__LIBELF;
418 			}
419 		}
420 
421 		if (!name && prog->idx == obj->efile.text_shndx)
422 			name = ".text";
423 
424 		if (!name) {
425 			pr_warning("failed to find sym for prog %s\n",
426 				   prog->section_name);
427 			return -EINVAL;
428 		}
429 
430 		prog->name = strdup(name);
431 		if (!prog->name) {
432 			pr_warning("failed to allocate memory for prog sym %s\n",
433 				   name);
434 			return -ENOMEM;
435 		}
436 	}
437 
438 	return 0;
439 }
440 
441 static struct bpf_object *bpf_object__new(const char *path,
442 					  void *obj_buf,
443 					  size_t obj_buf_sz)
444 {
445 	struct bpf_object *obj;
446 
447 	obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
448 	if (!obj) {
449 		pr_warning("alloc memory failed for %s\n", path);
450 		return ERR_PTR(-ENOMEM);
451 	}
452 
453 	strcpy(obj->path, path);
454 	obj->efile.fd = -1;
455 
456 	/*
457 	 * Caller of this function should also calls
458 	 * bpf_object__elf_finish() after data collection to return
459 	 * obj_buf to user. If not, we should duplicate the buffer to
460 	 * avoid user freeing them before elf finish.
461 	 */
462 	obj->efile.obj_buf = obj_buf;
463 	obj->efile.obj_buf_sz = obj_buf_sz;
464 	obj->efile.maps_shndx = -1;
465 
466 	obj->loaded = false;
467 
468 	INIT_LIST_HEAD(&obj->list);
469 	list_add(&obj->list, &bpf_objects_list);
470 	return obj;
471 }
472 
473 static void bpf_object__elf_finish(struct bpf_object *obj)
474 {
475 	if (!obj_elf_valid(obj))
476 		return;
477 
478 	if (obj->efile.elf) {
479 		elf_end(obj->efile.elf);
480 		obj->efile.elf = NULL;
481 	}
482 	obj->efile.symbols = NULL;
483 
484 	zfree(&obj->efile.reloc);
485 	obj->efile.nr_reloc = 0;
486 	zclose(obj->efile.fd);
487 	obj->efile.obj_buf = NULL;
488 	obj->efile.obj_buf_sz = 0;
489 }
490 
491 static int bpf_object__elf_init(struct bpf_object *obj)
492 {
493 	int err = 0;
494 	GElf_Ehdr *ep;
495 
496 	if (obj_elf_valid(obj)) {
497 		pr_warning("elf init: internal error\n");
498 		return -LIBBPF_ERRNO__LIBELF;
499 	}
500 
501 	if (obj->efile.obj_buf_sz > 0) {
502 		/*
503 		 * obj_buf should have been validated by
504 		 * bpf_object__open_buffer().
505 		 */
506 		obj->efile.elf = elf_memory(obj->efile.obj_buf,
507 					    obj->efile.obj_buf_sz);
508 	} else {
509 		obj->efile.fd = open(obj->path, O_RDONLY);
510 		if (obj->efile.fd < 0) {
511 			char errmsg[STRERR_BUFSIZE];
512 			char *cp = libbpf_strerror_r(errno, errmsg,
513 						     sizeof(errmsg));
514 
515 			pr_warning("failed to open %s: %s\n", obj->path, cp);
516 			return -errno;
517 		}
518 
519 		obj->efile.elf = elf_begin(obj->efile.fd,
520 				LIBBPF_ELF_C_READ_MMAP,
521 				NULL);
522 	}
523 
524 	if (!obj->efile.elf) {
525 		pr_warning("failed to open %s as ELF file\n",
526 				obj->path);
527 		err = -LIBBPF_ERRNO__LIBELF;
528 		goto errout;
529 	}
530 
531 	if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
532 		pr_warning("failed to get EHDR from %s\n",
533 				obj->path);
534 		err = -LIBBPF_ERRNO__FORMAT;
535 		goto errout;
536 	}
537 	ep = &obj->efile.ehdr;
538 
539 	/* Old LLVM set e_machine to EM_NONE */
540 	if ((ep->e_type != ET_REL) || (ep->e_machine && (ep->e_machine != EM_BPF))) {
541 		pr_warning("%s is not an eBPF object file\n",
542 			obj->path);
543 		err = -LIBBPF_ERRNO__FORMAT;
544 		goto errout;
545 	}
546 
547 	return 0;
548 errout:
549 	bpf_object__elf_finish(obj);
550 	return err;
551 }
552 
553 static int
554 bpf_object__check_endianness(struct bpf_object *obj)
555 {
556 	static unsigned int const endian = 1;
557 
558 	switch (obj->efile.ehdr.e_ident[EI_DATA]) {
559 	case ELFDATA2LSB:
560 		/* We are big endian, BPF obj is little endian. */
561 		if (*(unsigned char const *)&endian != 1)
562 			goto mismatch;
563 		break;
564 
565 	case ELFDATA2MSB:
566 		/* We are little endian, BPF obj is big endian. */
567 		if (*(unsigned char const *)&endian != 0)
568 			goto mismatch;
569 		break;
570 	default:
571 		return -LIBBPF_ERRNO__ENDIAN;
572 	}
573 
574 	return 0;
575 
576 mismatch:
577 	pr_warning("Error: endianness mismatch.\n");
578 	return -LIBBPF_ERRNO__ENDIAN;
579 }
580 
581 static int
582 bpf_object__init_license(struct bpf_object *obj,
583 			 void *data, size_t size)
584 {
585 	memcpy(obj->license, data,
586 	       min(size, sizeof(obj->license) - 1));
587 	pr_debug("license of %s is %s\n", obj->path, obj->license);
588 	return 0;
589 }
590 
591 static int
592 bpf_object__init_kversion(struct bpf_object *obj,
593 			  void *data, size_t size)
594 {
595 	__u32 kver;
596 
597 	if (size != sizeof(kver)) {
598 		pr_warning("invalid kver section in %s\n", obj->path);
599 		return -LIBBPF_ERRNO__FORMAT;
600 	}
601 	memcpy(&kver, data, sizeof(kver));
602 	obj->kern_version = kver;
603 	pr_debug("kernel version of %s is %x\n", obj->path,
604 		 obj->kern_version);
605 	return 0;
606 }
607 
608 static int compare_bpf_map(const void *_a, const void *_b)
609 {
610 	const struct bpf_map *a = _a;
611 	const struct bpf_map *b = _b;
612 
613 	return a->offset - b->offset;
614 }
615 
616 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
617 {
618 	if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
619 	    type == BPF_MAP_TYPE_HASH_OF_MAPS)
620 		return true;
621 	return false;
622 }
623 
624 static int
625 bpf_object__init_maps(struct bpf_object *obj, int flags)
626 {
627 	bool strict = !(flags & MAPS_RELAX_COMPAT);
628 	int i, map_idx, map_def_sz, nr_maps = 0;
629 	Elf_Scn *scn;
630 	Elf_Data *data;
631 	Elf_Data *symbols = obj->efile.symbols;
632 
633 	if (obj->efile.maps_shndx < 0)
634 		return -EINVAL;
635 	if (!symbols)
636 		return -EINVAL;
637 
638 	scn = elf_getscn(obj->efile.elf, obj->efile.maps_shndx);
639 	if (scn)
640 		data = elf_getdata(scn, NULL);
641 	if (!scn || !data) {
642 		pr_warning("failed to get Elf_Data from map section %d\n",
643 			   obj->efile.maps_shndx);
644 		return -EINVAL;
645 	}
646 
647 	/*
648 	 * Count number of maps. Each map has a name.
649 	 * Array of maps is not supported: only the first element is
650 	 * considered.
651 	 *
652 	 * TODO: Detect array of map and report error.
653 	 */
654 	for (i = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
655 		GElf_Sym sym;
656 
657 		if (!gelf_getsym(symbols, i, &sym))
658 			continue;
659 		if (sym.st_shndx != obj->efile.maps_shndx)
660 			continue;
661 		nr_maps++;
662 	}
663 
664 	/* Alloc obj->maps and fill nr_maps. */
665 	pr_debug("maps in %s: %d maps in %zd bytes\n", obj->path,
666 		 nr_maps, data->d_size);
667 
668 	if (!nr_maps)
669 		return 0;
670 
671 	/* Assume equally sized map definitions */
672 	map_def_sz = data->d_size / nr_maps;
673 	if (!data->d_size || (data->d_size % nr_maps) != 0) {
674 		pr_warning("unable to determine map definition size "
675 			   "section %s, %d maps in %zd bytes\n",
676 			   obj->path, nr_maps, data->d_size);
677 		return -EINVAL;
678 	}
679 
680 	obj->maps = calloc(nr_maps, sizeof(obj->maps[0]));
681 	if (!obj->maps) {
682 		pr_warning("alloc maps for object failed\n");
683 		return -ENOMEM;
684 	}
685 	obj->nr_maps = nr_maps;
686 
687 	for (i = 0; i < nr_maps; i++) {
688 		/*
689 		 * fill all fd with -1 so won't close incorrect
690 		 * fd (fd=0 is stdin) when failure (zclose won't close
691 		 * negative fd)).
692 		 */
693 		obj->maps[i].fd = -1;
694 		obj->maps[i].inner_map_fd = -1;
695 	}
696 
697 	/*
698 	 * Fill obj->maps using data in "maps" section.
699 	 */
700 	for (i = 0, map_idx = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
701 		GElf_Sym sym;
702 		const char *map_name;
703 		struct bpf_map_def *def;
704 
705 		if (!gelf_getsym(symbols, i, &sym))
706 			continue;
707 		if (sym.st_shndx != obj->efile.maps_shndx)
708 			continue;
709 
710 		map_name = elf_strptr(obj->efile.elf,
711 				      obj->efile.strtabidx,
712 				      sym.st_name);
713 		obj->maps[map_idx].offset = sym.st_value;
714 		if (sym.st_value + map_def_sz > data->d_size) {
715 			pr_warning("corrupted maps section in %s: last map \"%s\" too small\n",
716 				   obj->path, map_name);
717 			return -EINVAL;
718 		}
719 
720 		obj->maps[map_idx].name = strdup(map_name);
721 		if (!obj->maps[map_idx].name) {
722 			pr_warning("failed to alloc map name\n");
723 			return -ENOMEM;
724 		}
725 		pr_debug("map %d is \"%s\"\n", map_idx,
726 			 obj->maps[map_idx].name);
727 		def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
728 		/*
729 		 * If the definition of the map in the object file fits in
730 		 * bpf_map_def, copy it.  Any extra fields in our version
731 		 * of bpf_map_def will default to zero as a result of the
732 		 * calloc above.
733 		 */
734 		if (map_def_sz <= sizeof(struct bpf_map_def)) {
735 			memcpy(&obj->maps[map_idx].def, def, map_def_sz);
736 		} else {
737 			/*
738 			 * Here the map structure being read is bigger than what
739 			 * we expect, truncate if the excess bits are all zero.
740 			 * If they are not zero, reject this map as
741 			 * incompatible.
742 			 */
743 			char *b;
744 			for (b = ((char *)def) + sizeof(struct bpf_map_def);
745 			     b < ((char *)def) + map_def_sz; b++) {
746 				if (*b != 0) {
747 					pr_warning("maps section in %s: \"%s\" "
748 						   "has unrecognized, non-zero "
749 						   "options\n",
750 						   obj->path, map_name);
751 					if (strict)
752 						return -EINVAL;
753 				}
754 			}
755 			memcpy(&obj->maps[map_idx].def, def,
756 			       sizeof(struct bpf_map_def));
757 		}
758 		map_idx++;
759 	}
760 
761 	qsort(obj->maps, obj->nr_maps, sizeof(obj->maps[0]), compare_bpf_map);
762 	return 0;
763 }
764 
765 static bool section_have_execinstr(struct bpf_object *obj, int idx)
766 {
767 	Elf_Scn *scn;
768 	GElf_Shdr sh;
769 
770 	scn = elf_getscn(obj->efile.elf, idx);
771 	if (!scn)
772 		return false;
773 
774 	if (gelf_getshdr(scn, &sh) != &sh)
775 		return false;
776 
777 	if (sh.sh_flags & SHF_EXECINSTR)
778 		return true;
779 
780 	return false;
781 }
782 
783 static int bpf_object__elf_collect(struct bpf_object *obj, int flags)
784 {
785 	Elf *elf = obj->efile.elf;
786 	GElf_Ehdr *ep = &obj->efile.ehdr;
787 	Elf_Data *btf_ext_data = NULL;
788 	Elf_Scn *scn = NULL;
789 	int idx = 0, err = 0;
790 
791 	/* Elf is corrupted/truncated, avoid calling elf_strptr. */
792 	if (!elf_rawdata(elf_getscn(elf, ep->e_shstrndx), NULL)) {
793 		pr_warning("failed to get e_shstrndx from %s\n",
794 			   obj->path);
795 		return -LIBBPF_ERRNO__FORMAT;
796 	}
797 
798 	while ((scn = elf_nextscn(elf, scn)) != NULL) {
799 		char *name;
800 		GElf_Shdr sh;
801 		Elf_Data *data;
802 
803 		idx++;
804 		if (gelf_getshdr(scn, &sh) != &sh) {
805 			pr_warning("failed to get section(%d) header from %s\n",
806 				   idx, obj->path);
807 			err = -LIBBPF_ERRNO__FORMAT;
808 			goto out;
809 		}
810 
811 		name = elf_strptr(elf, ep->e_shstrndx, sh.sh_name);
812 		if (!name) {
813 			pr_warning("failed to get section(%d) name from %s\n",
814 				   idx, obj->path);
815 			err = -LIBBPF_ERRNO__FORMAT;
816 			goto out;
817 		}
818 
819 		data = elf_getdata(scn, 0);
820 		if (!data) {
821 			pr_warning("failed to get section(%d) data from %s(%s)\n",
822 				   idx, name, obj->path);
823 			err = -LIBBPF_ERRNO__FORMAT;
824 			goto out;
825 		}
826 		pr_debug("section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
827 			 idx, name, (unsigned long)data->d_size,
828 			 (int)sh.sh_link, (unsigned long)sh.sh_flags,
829 			 (int)sh.sh_type);
830 
831 		if (strcmp(name, "license") == 0)
832 			err = bpf_object__init_license(obj,
833 						       data->d_buf,
834 						       data->d_size);
835 		else if (strcmp(name, "version") == 0)
836 			err = bpf_object__init_kversion(obj,
837 							data->d_buf,
838 							data->d_size);
839 		else if (strcmp(name, "maps") == 0)
840 			obj->efile.maps_shndx = idx;
841 		else if (strcmp(name, BTF_ELF_SEC) == 0) {
842 			obj->btf = btf__new(data->d_buf, data->d_size,
843 					    __pr_debug);
844 			if (IS_ERR(obj->btf)) {
845 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
846 					   BTF_ELF_SEC, PTR_ERR(obj->btf));
847 				obj->btf = NULL;
848 			}
849 		} else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
850 			btf_ext_data = data;
851 		} else if (sh.sh_type == SHT_SYMTAB) {
852 			if (obj->efile.symbols) {
853 				pr_warning("bpf: multiple SYMTAB in %s\n",
854 					   obj->path);
855 				err = -LIBBPF_ERRNO__FORMAT;
856 			} else {
857 				obj->efile.symbols = data;
858 				obj->efile.strtabidx = sh.sh_link;
859 			}
860 		} else if ((sh.sh_type == SHT_PROGBITS) &&
861 			   (sh.sh_flags & SHF_EXECINSTR) &&
862 			   (data->d_size > 0)) {
863 			if (strcmp(name, ".text") == 0)
864 				obj->efile.text_shndx = idx;
865 			err = bpf_object__add_program(obj, data->d_buf,
866 						      data->d_size, name, idx);
867 			if (err) {
868 				char errmsg[STRERR_BUFSIZE];
869 				char *cp = libbpf_strerror_r(-err, errmsg,
870 							     sizeof(errmsg));
871 
872 				pr_warning("failed to alloc program %s (%s): %s",
873 					   name, obj->path, cp);
874 			}
875 		} else if (sh.sh_type == SHT_REL) {
876 			void *reloc = obj->efile.reloc;
877 			int nr_reloc = obj->efile.nr_reloc + 1;
878 			int sec = sh.sh_info; /* points to other section */
879 
880 			/* Only do relo for section with exec instructions */
881 			if (!section_have_execinstr(obj, sec)) {
882 				pr_debug("skip relo %s(%d) for section(%d)\n",
883 					 name, idx, sec);
884 				continue;
885 			}
886 
887 			reloc = reallocarray(reloc, nr_reloc,
888 					     sizeof(*obj->efile.reloc));
889 			if (!reloc) {
890 				pr_warning("realloc failed\n");
891 				err = -ENOMEM;
892 			} else {
893 				int n = nr_reloc - 1;
894 
895 				obj->efile.reloc = reloc;
896 				obj->efile.nr_reloc = nr_reloc;
897 
898 				obj->efile.reloc[n].shdr = sh;
899 				obj->efile.reloc[n].data = data;
900 			}
901 		} else {
902 			pr_debug("skip section(%d) %s\n", idx, name);
903 		}
904 		if (err)
905 			goto out;
906 	}
907 
908 	if (!obj->efile.strtabidx || obj->efile.strtabidx >= idx) {
909 		pr_warning("Corrupted ELF file: index of strtab invalid\n");
910 		return LIBBPF_ERRNO__FORMAT;
911 	}
912 	if (btf_ext_data) {
913 		if (!obj->btf) {
914 			pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
915 				 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
916 		} else {
917 			obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
918 						    btf_ext_data->d_size,
919 						    __pr_debug);
920 			if (IS_ERR(obj->btf_ext)) {
921 				pr_warning("Error loading ELF section %s: %ld. Ignored and continue.\n",
922 					   BTF_EXT_ELF_SEC,
923 					   PTR_ERR(obj->btf_ext));
924 				obj->btf_ext = NULL;
925 			}
926 		}
927 	}
928 	if (obj->efile.maps_shndx >= 0) {
929 		err = bpf_object__init_maps(obj, flags);
930 		if (err)
931 			goto out;
932 	}
933 	err = bpf_object__init_prog_names(obj);
934 out:
935 	return err;
936 }
937 
938 static struct bpf_program *
939 bpf_object__find_prog_by_idx(struct bpf_object *obj, int idx)
940 {
941 	struct bpf_program *prog;
942 	size_t i;
943 
944 	for (i = 0; i < obj->nr_programs; i++) {
945 		prog = &obj->programs[i];
946 		if (prog->idx == idx)
947 			return prog;
948 	}
949 	return NULL;
950 }
951 
952 struct bpf_program *
953 bpf_object__find_program_by_title(struct bpf_object *obj, const char *title)
954 {
955 	struct bpf_program *pos;
956 
957 	bpf_object__for_each_program(pos, obj) {
958 		if (pos->section_name && !strcmp(pos->section_name, title))
959 			return pos;
960 	}
961 	return NULL;
962 }
963 
964 static int
965 bpf_program__collect_reloc(struct bpf_program *prog, GElf_Shdr *shdr,
966 			   Elf_Data *data, struct bpf_object *obj)
967 {
968 	Elf_Data *symbols = obj->efile.symbols;
969 	int text_shndx = obj->efile.text_shndx;
970 	int maps_shndx = obj->efile.maps_shndx;
971 	struct bpf_map *maps = obj->maps;
972 	size_t nr_maps = obj->nr_maps;
973 	int i, nrels;
974 
975 	pr_debug("collecting relocating info for: '%s'\n",
976 		 prog->section_name);
977 	nrels = shdr->sh_size / shdr->sh_entsize;
978 
979 	prog->reloc_desc = malloc(sizeof(*prog->reloc_desc) * nrels);
980 	if (!prog->reloc_desc) {
981 		pr_warning("failed to alloc memory in relocation\n");
982 		return -ENOMEM;
983 	}
984 	prog->nr_reloc = nrels;
985 
986 	for (i = 0; i < nrels; i++) {
987 		GElf_Sym sym;
988 		GElf_Rel rel;
989 		unsigned int insn_idx;
990 		struct bpf_insn *insns = prog->insns;
991 		size_t map_idx;
992 
993 		if (!gelf_getrel(data, i, &rel)) {
994 			pr_warning("relocation: failed to get %d reloc\n", i);
995 			return -LIBBPF_ERRNO__FORMAT;
996 		}
997 
998 		if (!gelf_getsym(symbols,
999 				 GELF_R_SYM(rel.r_info),
1000 				 &sym)) {
1001 			pr_warning("relocation: symbol %"PRIx64" not found\n",
1002 				   GELF_R_SYM(rel.r_info));
1003 			return -LIBBPF_ERRNO__FORMAT;
1004 		}
1005 		pr_debug("relo for %lld value %lld name %d\n",
1006 			 (long long) (rel.r_info >> 32),
1007 			 (long long) sym.st_value, sym.st_name);
1008 
1009 		if (sym.st_shndx != maps_shndx && sym.st_shndx != text_shndx) {
1010 			pr_warning("Program '%s' contains non-map related relo data pointing to section %u\n",
1011 				   prog->section_name, sym.st_shndx);
1012 			return -LIBBPF_ERRNO__RELOC;
1013 		}
1014 
1015 		insn_idx = rel.r_offset / sizeof(struct bpf_insn);
1016 		pr_debug("relocation: insn_idx=%u\n", insn_idx);
1017 
1018 		if (insns[insn_idx].code == (BPF_JMP | BPF_CALL)) {
1019 			if (insns[insn_idx].src_reg != BPF_PSEUDO_CALL) {
1020 				pr_warning("incorrect bpf_call opcode\n");
1021 				return -LIBBPF_ERRNO__RELOC;
1022 			}
1023 			prog->reloc_desc[i].type = RELO_CALL;
1024 			prog->reloc_desc[i].insn_idx = insn_idx;
1025 			prog->reloc_desc[i].text_off = sym.st_value;
1026 			obj->has_pseudo_calls = true;
1027 			continue;
1028 		}
1029 
1030 		if (insns[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1031 			pr_warning("bpf: relocation: invalid relo for insns[%d].code 0x%x\n",
1032 				   insn_idx, insns[insn_idx].code);
1033 			return -LIBBPF_ERRNO__RELOC;
1034 		}
1035 
1036 		/* TODO: 'maps' is sorted. We can use bsearch to make it faster. */
1037 		for (map_idx = 0; map_idx < nr_maps; map_idx++) {
1038 			if (maps[map_idx].offset == sym.st_value) {
1039 				pr_debug("relocation: find map %zd (%s) for insn %u\n",
1040 					 map_idx, maps[map_idx].name, insn_idx);
1041 				break;
1042 			}
1043 		}
1044 
1045 		if (map_idx >= nr_maps) {
1046 			pr_warning("bpf relocation: map_idx %d large than %d\n",
1047 				   (int)map_idx, (int)nr_maps - 1);
1048 			return -LIBBPF_ERRNO__RELOC;
1049 		}
1050 
1051 		prog->reloc_desc[i].type = RELO_LD64;
1052 		prog->reloc_desc[i].insn_idx = insn_idx;
1053 		prog->reloc_desc[i].map_idx = map_idx;
1054 	}
1055 	return 0;
1056 }
1057 
1058 static int bpf_map_find_btf_info(struct bpf_map *map, const struct btf *btf)
1059 {
1060 	const struct btf_type *container_type;
1061 	const struct btf_member *key, *value;
1062 	struct bpf_map_def *def = &map->def;
1063 	const size_t max_name = 256;
1064 	char container_name[max_name];
1065 	__s64 key_size, value_size;
1066 	__s32 container_id;
1067 
1068 	if (snprintf(container_name, max_name, "____btf_map_%s", map->name) ==
1069 	    max_name) {
1070 		pr_warning("map:%s length of '____btf_map_%s' is too long\n",
1071 			   map->name, map->name);
1072 		return -EINVAL;
1073 	}
1074 
1075 	container_id = btf__find_by_name(btf, container_name);
1076 	if (container_id < 0) {
1077 		pr_debug("map:%s container_name:%s cannot be found in BTF. Missing BPF_ANNOTATE_KV_PAIR?\n",
1078 			 map->name, container_name);
1079 		return container_id;
1080 	}
1081 
1082 	container_type = btf__type_by_id(btf, container_id);
1083 	if (!container_type) {
1084 		pr_warning("map:%s cannot find BTF type for container_id:%u\n",
1085 			   map->name, container_id);
1086 		return -EINVAL;
1087 	}
1088 
1089 	if (BTF_INFO_KIND(container_type->info) != BTF_KIND_STRUCT ||
1090 	    BTF_INFO_VLEN(container_type->info) < 2) {
1091 		pr_warning("map:%s container_name:%s is an invalid container struct\n",
1092 			   map->name, container_name);
1093 		return -EINVAL;
1094 	}
1095 
1096 	key = (struct btf_member *)(container_type + 1);
1097 	value = key + 1;
1098 
1099 	key_size = btf__resolve_size(btf, key->type);
1100 	if (key_size < 0) {
1101 		pr_warning("map:%s invalid BTF key_type_size\n",
1102 			   map->name);
1103 		return key_size;
1104 	}
1105 
1106 	if (def->key_size != key_size) {
1107 		pr_warning("map:%s btf_key_type_size:%u != map_def_key_size:%u\n",
1108 			   map->name, (__u32)key_size, def->key_size);
1109 		return -EINVAL;
1110 	}
1111 
1112 	value_size = btf__resolve_size(btf, value->type);
1113 	if (value_size < 0) {
1114 		pr_warning("map:%s invalid BTF value_type_size\n", map->name);
1115 		return value_size;
1116 	}
1117 
1118 	if (def->value_size != value_size) {
1119 		pr_warning("map:%s btf_value_type_size:%u != map_def_value_size:%u\n",
1120 			   map->name, (__u32)value_size, def->value_size);
1121 		return -EINVAL;
1122 	}
1123 
1124 	map->btf_key_type_id = key->type;
1125 	map->btf_value_type_id = value->type;
1126 
1127 	return 0;
1128 }
1129 
1130 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
1131 {
1132 	struct bpf_map_info info = {};
1133 	__u32 len = sizeof(info);
1134 	int new_fd, err;
1135 	char *new_name;
1136 
1137 	err = bpf_obj_get_info_by_fd(fd, &info, &len);
1138 	if (err)
1139 		return err;
1140 
1141 	new_name = strdup(info.name);
1142 	if (!new_name)
1143 		return -errno;
1144 
1145 	new_fd = open("/", O_RDONLY | O_CLOEXEC);
1146 	if (new_fd < 0)
1147 		goto err_free_new_name;
1148 
1149 	new_fd = dup3(fd, new_fd, O_CLOEXEC);
1150 	if (new_fd < 0)
1151 		goto err_close_new_fd;
1152 
1153 	err = zclose(map->fd);
1154 	if (err)
1155 		goto err_close_new_fd;
1156 	free(map->name);
1157 
1158 	map->fd = new_fd;
1159 	map->name = new_name;
1160 	map->def.type = info.type;
1161 	map->def.key_size = info.key_size;
1162 	map->def.value_size = info.value_size;
1163 	map->def.max_entries = info.max_entries;
1164 	map->def.map_flags = info.map_flags;
1165 	map->btf_key_type_id = info.btf_key_type_id;
1166 	map->btf_value_type_id = info.btf_value_type_id;
1167 
1168 	return 0;
1169 
1170 err_close_new_fd:
1171 	close(new_fd);
1172 err_free_new_name:
1173 	free(new_name);
1174 	return -errno;
1175 }
1176 
1177 static int
1178 bpf_object__probe_name(struct bpf_object *obj)
1179 {
1180 	struct bpf_load_program_attr attr;
1181 	char *cp, errmsg[STRERR_BUFSIZE];
1182 	struct bpf_insn insns[] = {
1183 		BPF_MOV64_IMM(BPF_REG_0, 0),
1184 		BPF_EXIT_INSN(),
1185 	};
1186 	int ret;
1187 
1188 	/* make sure basic loading works */
1189 
1190 	memset(&attr, 0, sizeof(attr));
1191 	attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
1192 	attr.insns = insns;
1193 	attr.insns_cnt = ARRAY_SIZE(insns);
1194 	attr.license = "GPL";
1195 
1196 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1197 	if (ret < 0) {
1198 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1199 		pr_warning("Error in %s():%s(%d). Couldn't load basic 'r0 = 0' BPF program.\n",
1200 			   __func__, cp, errno);
1201 		return -errno;
1202 	}
1203 	close(ret);
1204 
1205 	/* now try the same program, but with the name */
1206 
1207 	attr.name = "test";
1208 	ret = bpf_load_program_xattr(&attr, NULL, 0);
1209 	if (ret >= 0) {
1210 		obj->caps.name = 1;
1211 		close(ret);
1212 	}
1213 
1214 	return 0;
1215 }
1216 
1217 static int
1218 bpf_object__probe_caps(struct bpf_object *obj)
1219 {
1220 	return bpf_object__probe_name(obj);
1221 }
1222 
1223 static int
1224 bpf_object__create_maps(struct bpf_object *obj)
1225 {
1226 	struct bpf_create_map_attr create_attr = {};
1227 	unsigned int i;
1228 	int err;
1229 
1230 	for (i = 0; i < obj->nr_maps; i++) {
1231 		struct bpf_map *map = &obj->maps[i];
1232 		struct bpf_map_def *def = &map->def;
1233 		char *cp, errmsg[STRERR_BUFSIZE];
1234 		int *pfd = &map->fd;
1235 
1236 		if (map->fd >= 0) {
1237 			pr_debug("skip map create (preset) %s: fd=%d\n",
1238 				 map->name, map->fd);
1239 			continue;
1240 		}
1241 
1242 		if (obj->caps.name)
1243 			create_attr.name = map->name;
1244 		create_attr.map_ifindex = map->map_ifindex;
1245 		create_attr.map_type = def->type;
1246 		create_attr.map_flags = def->map_flags;
1247 		create_attr.key_size = def->key_size;
1248 		create_attr.value_size = def->value_size;
1249 		create_attr.max_entries = def->max_entries;
1250 		create_attr.btf_fd = 0;
1251 		create_attr.btf_key_type_id = 0;
1252 		create_attr.btf_value_type_id = 0;
1253 		if (bpf_map_type__is_map_in_map(def->type) &&
1254 		    map->inner_map_fd >= 0)
1255 			create_attr.inner_map_fd = map->inner_map_fd;
1256 
1257 		if (obj->btf && !bpf_map_find_btf_info(map, obj->btf)) {
1258 			create_attr.btf_fd = btf__fd(obj->btf);
1259 			create_attr.btf_key_type_id = map->btf_key_type_id;
1260 			create_attr.btf_value_type_id = map->btf_value_type_id;
1261 		}
1262 
1263 		*pfd = bpf_create_map_xattr(&create_attr);
1264 		if (*pfd < 0 && create_attr.btf_key_type_id) {
1265 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1266 			pr_warning("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
1267 				   map->name, cp, errno);
1268 			create_attr.btf_fd = 0;
1269 			create_attr.btf_key_type_id = 0;
1270 			create_attr.btf_value_type_id = 0;
1271 			map->btf_key_type_id = 0;
1272 			map->btf_value_type_id = 0;
1273 			*pfd = bpf_create_map_xattr(&create_attr);
1274 		}
1275 
1276 		if (*pfd < 0) {
1277 			size_t j;
1278 
1279 			err = *pfd;
1280 			cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1281 			pr_warning("failed to create map (name: '%s'): %s\n",
1282 				   map->name, cp);
1283 			for (j = 0; j < i; j++)
1284 				zclose(obj->maps[j].fd);
1285 			return err;
1286 		}
1287 		pr_debug("create map %s: fd=%d\n", map->name, *pfd);
1288 	}
1289 
1290 	return 0;
1291 }
1292 
1293 static int
1294 check_btf_ext_reloc_err(struct bpf_program *prog, int err,
1295 			void *btf_prog_info, const char *info_name)
1296 {
1297 	if (err != -ENOENT) {
1298 		pr_warning("Error in loading %s for sec %s.\n",
1299 			   info_name, prog->section_name);
1300 		return err;
1301 	}
1302 
1303 	/* err == -ENOENT (i.e. prog->section_name not found in btf_ext) */
1304 
1305 	if (btf_prog_info) {
1306 		/*
1307 		 * Some info has already been found but has problem
1308 		 * in the last btf_ext reloc.  Must have to error
1309 		 * out.
1310 		 */
1311 		pr_warning("Error in relocating %s for sec %s.\n",
1312 			   info_name, prog->section_name);
1313 		return err;
1314 	}
1315 
1316 	/*
1317 	 * Have problem loading the very first info.  Ignore
1318 	 * the rest.
1319 	 */
1320 	pr_warning("Cannot find %s for main program sec %s. Ignore all %s.\n",
1321 		   info_name, prog->section_name, info_name);
1322 	return 0;
1323 }
1324 
1325 static int
1326 bpf_program_reloc_btf_ext(struct bpf_program *prog, struct bpf_object *obj,
1327 			  const char *section_name,  __u32 insn_offset)
1328 {
1329 	int err;
1330 
1331 	if (!insn_offset || prog->func_info) {
1332 		/*
1333 		 * !insn_offset => main program
1334 		 *
1335 		 * For sub prog, the main program's func_info has to
1336 		 * be loaded first (i.e. prog->func_info != NULL)
1337 		 */
1338 		err = btf_ext__reloc_func_info(obj->btf, obj->btf_ext,
1339 					       section_name, insn_offset,
1340 					       &prog->func_info,
1341 					       &prog->func_info_cnt);
1342 		if (err)
1343 			return check_btf_ext_reloc_err(prog, err,
1344 						       prog->func_info,
1345 						       "bpf_func_info");
1346 
1347 		prog->func_info_rec_size = btf_ext__func_info_rec_size(obj->btf_ext);
1348 	}
1349 
1350 	if (!insn_offset || prog->line_info) {
1351 		err = btf_ext__reloc_line_info(obj->btf, obj->btf_ext,
1352 					       section_name, insn_offset,
1353 					       &prog->line_info,
1354 					       &prog->line_info_cnt);
1355 		if (err)
1356 			return check_btf_ext_reloc_err(prog, err,
1357 						       prog->line_info,
1358 						       "bpf_line_info");
1359 
1360 		prog->line_info_rec_size = btf_ext__line_info_rec_size(obj->btf_ext);
1361 	}
1362 
1363 	if (!insn_offset)
1364 		prog->btf_fd = btf__fd(obj->btf);
1365 
1366 	return 0;
1367 }
1368 
1369 static int
1370 bpf_program__reloc_text(struct bpf_program *prog, struct bpf_object *obj,
1371 			struct reloc_desc *relo)
1372 {
1373 	struct bpf_insn *insn, *new_insn;
1374 	struct bpf_program *text;
1375 	size_t new_cnt;
1376 	int err;
1377 
1378 	if (relo->type != RELO_CALL)
1379 		return -LIBBPF_ERRNO__RELOC;
1380 
1381 	if (prog->idx == obj->efile.text_shndx) {
1382 		pr_warning("relo in .text insn %d into off %d\n",
1383 			   relo->insn_idx, relo->text_off);
1384 		return -LIBBPF_ERRNO__RELOC;
1385 	}
1386 
1387 	if (prog->main_prog_cnt == 0) {
1388 		text = bpf_object__find_prog_by_idx(obj, obj->efile.text_shndx);
1389 		if (!text) {
1390 			pr_warning("no .text section found yet relo into text exist\n");
1391 			return -LIBBPF_ERRNO__RELOC;
1392 		}
1393 		new_cnt = prog->insns_cnt + text->insns_cnt;
1394 		new_insn = reallocarray(prog->insns, new_cnt, sizeof(*insn));
1395 		if (!new_insn) {
1396 			pr_warning("oom in prog realloc\n");
1397 			return -ENOMEM;
1398 		}
1399 
1400 		if (obj->btf_ext) {
1401 			err = bpf_program_reloc_btf_ext(prog, obj,
1402 							text->section_name,
1403 							prog->insns_cnt);
1404 			if (err)
1405 				return err;
1406 		}
1407 
1408 		memcpy(new_insn + prog->insns_cnt, text->insns,
1409 		       text->insns_cnt * sizeof(*insn));
1410 		prog->insns = new_insn;
1411 		prog->main_prog_cnt = prog->insns_cnt;
1412 		prog->insns_cnt = new_cnt;
1413 		pr_debug("added %zd insn from %s to prog %s\n",
1414 			 text->insns_cnt, text->section_name,
1415 			 prog->section_name);
1416 	}
1417 	insn = &prog->insns[relo->insn_idx];
1418 	insn->imm += prog->main_prog_cnt - relo->insn_idx;
1419 	return 0;
1420 }
1421 
1422 static int
1423 bpf_program__relocate(struct bpf_program *prog, struct bpf_object *obj)
1424 {
1425 	int i, err;
1426 
1427 	if (!prog)
1428 		return 0;
1429 
1430 	if (obj->btf_ext) {
1431 		err = bpf_program_reloc_btf_ext(prog, obj,
1432 						prog->section_name, 0);
1433 		if (err)
1434 			return err;
1435 	}
1436 
1437 	if (!prog->reloc_desc)
1438 		return 0;
1439 
1440 	for (i = 0; i < prog->nr_reloc; i++) {
1441 		if (prog->reloc_desc[i].type == RELO_LD64) {
1442 			struct bpf_insn *insns = prog->insns;
1443 			int insn_idx, map_idx;
1444 
1445 			insn_idx = prog->reloc_desc[i].insn_idx;
1446 			map_idx = prog->reloc_desc[i].map_idx;
1447 
1448 			if (insn_idx >= (int)prog->insns_cnt) {
1449 				pr_warning("relocation out of range: '%s'\n",
1450 					   prog->section_name);
1451 				return -LIBBPF_ERRNO__RELOC;
1452 			}
1453 			insns[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
1454 			insns[insn_idx].imm = obj->maps[map_idx].fd;
1455 		} else {
1456 			err = bpf_program__reloc_text(prog, obj,
1457 						      &prog->reloc_desc[i]);
1458 			if (err)
1459 				return err;
1460 		}
1461 	}
1462 
1463 	zfree(&prog->reloc_desc);
1464 	prog->nr_reloc = 0;
1465 	return 0;
1466 }
1467 
1468 
1469 static int
1470 bpf_object__relocate(struct bpf_object *obj)
1471 {
1472 	struct bpf_program *prog;
1473 	size_t i;
1474 	int err;
1475 
1476 	for (i = 0; i < obj->nr_programs; i++) {
1477 		prog = &obj->programs[i];
1478 
1479 		err = bpf_program__relocate(prog, obj);
1480 		if (err) {
1481 			pr_warning("failed to relocate '%s'\n",
1482 				   prog->section_name);
1483 			return err;
1484 		}
1485 	}
1486 	return 0;
1487 }
1488 
1489 static int bpf_object__collect_reloc(struct bpf_object *obj)
1490 {
1491 	int i, err;
1492 
1493 	if (!obj_elf_valid(obj)) {
1494 		pr_warning("Internal error: elf object is closed\n");
1495 		return -LIBBPF_ERRNO__INTERNAL;
1496 	}
1497 
1498 	for (i = 0; i < obj->efile.nr_reloc; i++) {
1499 		GElf_Shdr *shdr = &obj->efile.reloc[i].shdr;
1500 		Elf_Data *data = obj->efile.reloc[i].data;
1501 		int idx = shdr->sh_info;
1502 		struct bpf_program *prog;
1503 
1504 		if (shdr->sh_type != SHT_REL) {
1505 			pr_warning("internal error at %d\n", __LINE__);
1506 			return -LIBBPF_ERRNO__INTERNAL;
1507 		}
1508 
1509 		prog = bpf_object__find_prog_by_idx(obj, idx);
1510 		if (!prog) {
1511 			pr_warning("relocation failed: no section(%d)\n", idx);
1512 			return -LIBBPF_ERRNO__RELOC;
1513 		}
1514 
1515 		err = bpf_program__collect_reloc(prog,
1516 						 shdr, data,
1517 						 obj);
1518 		if (err)
1519 			return err;
1520 	}
1521 	return 0;
1522 }
1523 
1524 static int
1525 load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
1526 	     char *license, __u32 kern_version, int *pfd)
1527 {
1528 	struct bpf_load_program_attr load_attr;
1529 	char *cp, errmsg[STRERR_BUFSIZE];
1530 	char *log_buf;
1531 	int ret;
1532 
1533 	memset(&load_attr, 0, sizeof(struct bpf_load_program_attr));
1534 	load_attr.prog_type = prog->type;
1535 	load_attr.expected_attach_type = prog->expected_attach_type;
1536 	if (prog->caps->name)
1537 		load_attr.name = prog->name;
1538 	load_attr.insns = insns;
1539 	load_attr.insns_cnt = insns_cnt;
1540 	load_attr.license = license;
1541 	load_attr.kern_version = kern_version;
1542 	load_attr.prog_ifindex = prog->prog_ifindex;
1543 	load_attr.prog_btf_fd = prog->btf_fd >= 0 ? prog->btf_fd : 0;
1544 	load_attr.func_info = prog->func_info;
1545 	load_attr.func_info_rec_size = prog->func_info_rec_size;
1546 	load_attr.func_info_cnt = prog->func_info_cnt;
1547 	load_attr.line_info = prog->line_info;
1548 	load_attr.line_info_rec_size = prog->line_info_rec_size;
1549 	load_attr.line_info_cnt = prog->line_info_cnt;
1550 	if (!load_attr.insns || !load_attr.insns_cnt)
1551 		return -EINVAL;
1552 
1553 	log_buf = malloc(BPF_LOG_BUF_SIZE);
1554 	if (!log_buf)
1555 		pr_warning("Alloc log buffer for bpf loader error, continue without log\n");
1556 
1557 	ret = bpf_load_program_xattr(&load_attr, log_buf, BPF_LOG_BUF_SIZE);
1558 
1559 	if (ret >= 0) {
1560 		*pfd = ret;
1561 		ret = 0;
1562 		goto out;
1563 	}
1564 
1565 	ret = -LIBBPF_ERRNO__LOAD;
1566 	cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1567 	pr_warning("load bpf program failed: %s\n", cp);
1568 
1569 	if (log_buf && log_buf[0] != '\0') {
1570 		ret = -LIBBPF_ERRNO__VERIFY;
1571 		pr_warning("-- BEGIN DUMP LOG ---\n");
1572 		pr_warning("\n%s\n", log_buf);
1573 		pr_warning("-- END LOG --\n");
1574 	} else if (load_attr.insns_cnt >= BPF_MAXINSNS) {
1575 		pr_warning("Program too large (%zu insns), at most %d insns\n",
1576 			   load_attr.insns_cnt, BPF_MAXINSNS);
1577 		ret = -LIBBPF_ERRNO__PROG2BIG;
1578 	} else {
1579 		/* Wrong program type? */
1580 		if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
1581 			int fd;
1582 
1583 			load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
1584 			load_attr.expected_attach_type = 0;
1585 			fd = bpf_load_program_xattr(&load_attr, NULL, 0);
1586 			if (fd >= 0) {
1587 				close(fd);
1588 				ret = -LIBBPF_ERRNO__PROGTYPE;
1589 				goto out;
1590 			}
1591 		}
1592 
1593 		if (log_buf)
1594 			ret = -LIBBPF_ERRNO__KVER;
1595 	}
1596 
1597 out:
1598 	free(log_buf);
1599 	return ret;
1600 }
1601 
1602 int
1603 bpf_program__load(struct bpf_program *prog,
1604 		  char *license, __u32 kern_version)
1605 {
1606 	int err = 0, fd, i;
1607 
1608 	if (prog->instances.nr < 0 || !prog->instances.fds) {
1609 		if (prog->preprocessor) {
1610 			pr_warning("Internal error: can't load program '%s'\n",
1611 				   prog->section_name);
1612 			return -LIBBPF_ERRNO__INTERNAL;
1613 		}
1614 
1615 		prog->instances.fds = malloc(sizeof(int));
1616 		if (!prog->instances.fds) {
1617 			pr_warning("Not enough memory for BPF fds\n");
1618 			return -ENOMEM;
1619 		}
1620 		prog->instances.nr = 1;
1621 		prog->instances.fds[0] = -1;
1622 	}
1623 
1624 	if (!prog->preprocessor) {
1625 		if (prog->instances.nr != 1) {
1626 			pr_warning("Program '%s' is inconsistent: nr(%d) != 1\n",
1627 				   prog->section_name, prog->instances.nr);
1628 		}
1629 		err = load_program(prog, prog->insns, prog->insns_cnt,
1630 				   license, kern_version, &fd);
1631 		if (!err)
1632 			prog->instances.fds[0] = fd;
1633 		goto out;
1634 	}
1635 
1636 	for (i = 0; i < prog->instances.nr; i++) {
1637 		struct bpf_prog_prep_result result;
1638 		bpf_program_prep_t preprocessor = prog->preprocessor;
1639 
1640 		bzero(&result, sizeof(result));
1641 		err = preprocessor(prog, i, prog->insns,
1642 				   prog->insns_cnt, &result);
1643 		if (err) {
1644 			pr_warning("Preprocessing the %dth instance of program '%s' failed\n",
1645 				   i, prog->section_name);
1646 			goto out;
1647 		}
1648 
1649 		if (!result.new_insn_ptr || !result.new_insn_cnt) {
1650 			pr_debug("Skip loading the %dth instance of program '%s'\n",
1651 				 i, prog->section_name);
1652 			prog->instances.fds[i] = -1;
1653 			if (result.pfd)
1654 				*result.pfd = -1;
1655 			continue;
1656 		}
1657 
1658 		err = load_program(prog, result.new_insn_ptr,
1659 				   result.new_insn_cnt,
1660 				   license, kern_version, &fd);
1661 
1662 		if (err) {
1663 			pr_warning("Loading the %dth instance of program '%s' failed\n",
1664 					i, prog->section_name);
1665 			goto out;
1666 		}
1667 
1668 		if (result.pfd)
1669 			*result.pfd = fd;
1670 		prog->instances.fds[i] = fd;
1671 	}
1672 out:
1673 	if (err)
1674 		pr_warning("failed to load program '%s'\n",
1675 			   prog->section_name);
1676 	zfree(&prog->insns);
1677 	prog->insns_cnt = 0;
1678 	return err;
1679 }
1680 
1681 static bool bpf_program__is_function_storage(struct bpf_program *prog,
1682 					     struct bpf_object *obj)
1683 {
1684 	return prog->idx == obj->efile.text_shndx && obj->has_pseudo_calls;
1685 }
1686 
1687 static int
1688 bpf_object__load_progs(struct bpf_object *obj)
1689 {
1690 	size_t i;
1691 	int err;
1692 
1693 	for (i = 0; i < obj->nr_programs; i++) {
1694 		if (bpf_program__is_function_storage(&obj->programs[i], obj))
1695 			continue;
1696 		err = bpf_program__load(&obj->programs[i],
1697 					obj->license,
1698 					obj->kern_version);
1699 		if (err)
1700 			return err;
1701 	}
1702 	return 0;
1703 }
1704 
1705 static bool bpf_prog_type__needs_kver(enum bpf_prog_type type)
1706 {
1707 	switch (type) {
1708 	case BPF_PROG_TYPE_SOCKET_FILTER:
1709 	case BPF_PROG_TYPE_SCHED_CLS:
1710 	case BPF_PROG_TYPE_SCHED_ACT:
1711 	case BPF_PROG_TYPE_XDP:
1712 	case BPF_PROG_TYPE_CGROUP_SKB:
1713 	case BPF_PROG_TYPE_CGROUP_SOCK:
1714 	case BPF_PROG_TYPE_LWT_IN:
1715 	case BPF_PROG_TYPE_LWT_OUT:
1716 	case BPF_PROG_TYPE_LWT_XMIT:
1717 	case BPF_PROG_TYPE_LWT_SEG6LOCAL:
1718 	case BPF_PROG_TYPE_SOCK_OPS:
1719 	case BPF_PROG_TYPE_SK_SKB:
1720 	case BPF_PROG_TYPE_CGROUP_DEVICE:
1721 	case BPF_PROG_TYPE_SK_MSG:
1722 	case BPF_PROG_TYPE_CGROUP_SOCK_ADDR:
1723 	case BPF_PROG_TYPE_LIRC_MODE2:
1724 	case BPF_PROG_TYPE_SK_REUSEPORT:
1725 	case BPF_PROG_TYPE_FLOW_DISSECTOR:
1726 	case BPF_PROG_TYPE_UNSPEC:
1727 	case BPF_PROG_TYPE_TRACEPOINT:
1728 	case BPF_PROG_TYPE_RAW_TRACEPOINT:
1729 	case BPF_PROG_TYPE_PERF_EVENT:
1730 		return false;
1731 	case BPF_PROG_TYPE_KPROBE:
1732 	default:
1733 		return true;
1734 	}
1735 }
1736 
1737 static int bpf_object__validate(struct bpf_object *obj, bool needs_kver)
1738 {
1739 	if (needs_kver && obj->kern_version == 0) {
1740 		pr_warning("%s doesn't provide kernel version\n",
1741 			   obj->path);
1742 		return -LIBBPF_ERRNO__KVERSION;
1743 	}
1744 	return 0;
1745 }
1746 
1747 static struct bpf_object *
1748 __bpf_object__open(const char *path, void *obj_buf, size_t obj_buf_sz,
1749 		   bool needs_kver, int flags)
1750 {
1751 	struct bpf_object *obj;
1752 	int err;
1753 
1754 	if (elf_version(EV_CURRENT) == EV_NONE) {
1755 		pr_warning("failed to init libelf for %s\n", path);
1756 		return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1757 	}
1758 
1759 	obj = bpf_object__new(path, obj_buf, obj_buf_sz);
1760 	if (IS_ERR(obj))
1761 		return obj;
1762 
1763 	CHECK_ERR(bpf_object__elf_init(obj), err, out);
1764 	CHECK_ERR(bpf_object__check_endianness(obj), err, out);
1765 	CHECK_ERR(bpf_object__elf_collect(obj, flags), err, out);
1766 	CHECK_ERR(bpf_object__collect_reloc(obj), err, out);
1767 	CHECK_ERR(bpf_object__validate(obj, needs_kver), err, out);
1768 
1769 	bpf_object__elf_finish(obj);
1770 	return obj;
1771 out:
1772 	bpf_object__close(obj);
1773 	return ERR_PTR(err);
1774 }
1775 
1776 struct bpf_object *__bpf_object__open_xattr(struct bpf_object_open_attr *attr,
1777 					    int flags)
1778 {
1779 	/* param validation */
1780 	if (!attr->file)
1781 		return NULL;
1782 
1783 	pr_debug("loading %s\n", attr->file);
1784 
1785 	return __bpf_object__open(attr->file, NULL, 0,
1786 				  bpf_prog_type__needs_kver(attr->prog_type),
1787 				  flags);
1788 }
1789 
1790 struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
1791 {
1792 	return __bpf_object__open_xattr(attr, 0);
1793 }
1794 
1795 struct bpf_object *bpf_object__open(const char *path)
1796 {
1797 	struct bpf_object_open_attr attr = {
1798 		.file		= path,
1799 		.prog_type	= BPF_PROG_TYPE_UNSPEC,
1800 	};
1801 
1802 	return bpf_object__open_xattr(&attr);
1803 }
1804 
1805 struct bpf_object *bpf_object__open_buffer(void *obj_buf,
1806 					   size_t obj_buf_sz,
1807 					   const char *name)
1808 {
1809 	char tmp_name[64];
1810 
1811 	/* param validation */
1812 	if (!obj_buf || obj_buf_sz <= 0)
1813 		return NULL;
1814 
1815 	if (!name) {
1816 		snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
1817 			 (unsigned long)obj_buf,
1818 			 (unsigned long)obj_buf_sz);
1819 		tmp_name[sizeof(tmp_name) - 1] = '\0';
1820 		name = tmp_name;
1821 	}
1822 	pr_debug("loading object '%s' from buffer\n",
1823 		 name);
1824 
1825 	return __bpf_object__open(name, obj_buf, obj_buf_sz, true, true);
1826 }
1827 
1828 int bpf_object__unload(struct bpf_object *obj)
1829 {
1830 	size_t i;
1831 
1832 	if (!obj)
1833 		return -EINVAL;
1834 
1835 	for (i = 0; i < obj->nr_maps; i++)
1836 		zclose(obj->maps[i].fd);
1837 
1838 	for (i = 0; i < obj->nr_programs; i++)
1839 		bpf_program__unload(&obj->programs[i]);
1840 
1841 	return 0;
1842 }
1843 
1844 int bpf_object__load(struct bpf_object *obj)
1845 {
1846 	int err;
1847 
1848 	if (!obj)
1849 		return -EINVAL;
1850 
1851 	if (obj->loaded) {
1852 		pr_warning("object should not be loaded twice\n");
1853 		return -EINVAL;
1854 	}
1855 
1856 	obj->loaded = true;
1857 
1858 	CHECK_ERR(bpf_object__probe_caps(obj), err, out);
1859 	CHECK_ERR(bpf_object__create_maps(obj), err, out);
1860 	CHECK_ERR(bpf_object__relocate(obj), err, out);
1861 	CHECK_ERR(bpf_object__load_progs(obj), err, out);
1862 
1863 	return 0;
1864 out:
1865 	bpf_object__unload(obj);
1866 	pr_warning("failed to load object '%s'\n", obj->path);
1867 	return err;
1868 }
1869 
1870 static int check_path(const char *path)
1871 {
1872 	char *cp, errmsg[STRERR_BUFSIZE];
1873 	struct statfs st_fs;
1874 	char *dname, *dir;
1875 	int err = 0;
1876 
1877 	if (path == NULL)
1878 		return -EINVAL;
1879 
1880 	dname = strdup(path);
1881 	if (dname == NULL)
1882 		return -ENOMEM;
1883 
1884 	dir = dirname(dname);
1885 	if (statfs(dir, &st_fs)) {
1886 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1887 		pr_warning("failed to statfs %s: %s\n", dir, cp);
1888 		err = -errno;
1889 	}
1890 	free(dname);
1891 
1892 	if (!err && st_fs.f_type != BPF_FS_MAGIC) {
1893 		pr_warning("specified path %s is not on BPF FS\n", path);
1894 		err = -EINVAL;
1895 	}
1896 
1897 	return err;
1898 }
1899 
1900 int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
1901 			      int instance)
1902 {
1903 	char *cp, errmsg[STRERR_BUFSIZE];
1904 	int err;
1905 
1906 	err = check_path(path);
1907 	if (err)
1908 		return err;
1909 
1910 	if (prog == NULL) {
1911 		pr_warning("invalid program pointer\n");
1912 		return -EINVAL;
1913 	}
1914 
1915 	if (instance < 0 || instance >= prog->instances.nr) {
1916 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1917 			   instance, prog->section_name, prog->instances.nr);
1918 		return -EINVAL;
1919 	}
1920 
1921 	if (bpf_obj_pin(prog->instances.fds[instance], path)) {
1922 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
1923 		pr_warning("failed to pin program: %s\n", cp);
1924 		return -errno;
1925 	}
1926 	pr_debug("pinned program '%s'\n", path);
1927 
1928 	return 0;
1929 }
1930 
1931 int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
1932 				int instance)
1933 {
1934 	int err;
1935 
1936 	err = check_path(path);
1937 	if (err)
1938 		return err;
1939 
1940 	if (prog == NULL) {
1941 		pr_warning("invalid program pointer\n");
1942 		return -EINVAL;
1943 	}
1944 
1945 	if (instance < 0 || instance >= prog->instances.nr) {
1946 		pr_warning("invalid prog instance %d of prog %s (max %d)\n",
1947 			   instance, prog->section_name, prog->instances.nr);
1948 		return -EINVAL;
1949 	}
1950 
1951 	err = unlink(path);
1952 	if (err != 0)
1953 		return -errno;
1954 	pr_debug("unpinned program '%s'\n", path);
1955 
1956 	return 0;
1957 }
1958 
1959 static int make_dir(const char *path)
1960 {
1961 	char *cp, errmsg[STRERR_BUFSIZE];
1962 	int err = 0;
1963 
1964 	if (mkdir(path, 0700) && errno != EEXIST)
1965 		err = -errno;
1966 
1967 	if (err) {
1968 		cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
1969 		pr_warning("failed to mkdir %s: %s\n", path, cp);
1970 	}
1971 	return err;
1972 }
1973 
1974 int bpf_program__pin(struct bpf_program *prog, const char *path)
1975 {
1976 	int i, err;
1977 
1978 	err = check_path(path);
1979 	if (err)
1980 		return err;
1981 
1982 	if (prog == NULL) {
1983 		pr_warning("invalid program pointer\n");
1984 		return -EINVAL;
1985 	}
1986 
1987 	if (prog->instances.nr <= 0) {
1988 		pr_warning("no instances of prog %s to pin\n",
1989 			   prog->section_name);
1990 		return -EINVAL;
1991 	}
1992 
1993 	if (prog->instances.nr == 1) {
1994 		/* don't create subdirs when pinning single instance */
1995 		return bpf_program__pin_instance(prog, path, 0);
1996 	}
1997 
1998 	err = make_dir(path);
1999 	if (err)
2000 		return err;
2001 
2002 	for (i = 0; i < prog->instances.nr; i++) {
2003 		char buf[PATH_MAX];
2004 		int len;
2005 
2006 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2007 		if (len < 0) {
2008 			err = -EINVAL;
2009 			goto err_unpin;
2010 		} else if (len >= PATH_MAX) {
2011 			err = -ENAMETOOLONG;
2012 			goto err_unpin;
2013 		}
2014 
2015 		err = bpf_program__pin_instance(prog, buf, i);
2016 		if (err)
2017 			goto err_unpin;
2018 	}
2019 
2020 	return 0;
2021 
2022 err_unpin:
2023 	for (i = i - 1; i >= 0; i--) {
2024 		char buf[PATH_MAX];
2025 		int len;
2026 
2027 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2028 		if (len < 0)
2029 			continue;
2030 		else if (len >= PATH_MAX)
2031 			continue;
2032 
2033 		bpf_program__unpin_instance(prog, buf, i);
2034 	}
2035 
2036 	rmdir(path);
2037 
2038 	return err;
2039 }
2040 
2041 int bpf_program__unpin(struct bpf_program *prog, const char *path)
2042 {
2043 	int i, err;
2044 
2045 	err = check_path(path);
2046 	if (err)
2047 		return err;
2048 
2049 	if (prog == NULL) {
2050 		pr_warning("invalid program pointer\n");
2051 		return -EINVAL;
2052 	}
2053 
2054 	if (prog->instances.nr <= 0) {
2055 		pr_warning("no instances of prog %s to pin\n",
2056 			   prog->section_name);
2057 		return -EINVAL;
2058 	}
2059 
2060 	if (prog->instances.nr == 1) {
2061 		/* don't create subdirs when pinning single instance */
2062 		return bpf_program__unpin_instance(prog, path, 0);
2063 	}
2064 
2065 	for (i = 0; i < prog->instances.nr; i++) {
2066 		char buf[PATH_MAX];
2067 		int len;
2068 
2069 		len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
2070 		if (len < 0)
2071 			return -EINVAL;
2072 		else if (len >= PATH_MAX)
2073 			return -ENAMETOOLONG;
2074 
2075 		err = bpf_program__unpin_instance(prog, buf, i);
2076 		if (err)
2077 			return err;
2078 	}
2079 
2080 	err = rmdir(path);
2081 	if (err)
2082 		return -errno;
2083 
2084 	return 0;
2085 }
2086 
2087 int bpf_map__pin(struct bpf_map *map, const char *path)
2088 {
2089 	char *cp, errmsg[STRERR_BUFSIZE];
2090 	int err;
2091 
2092 	err = check_path(path);
2093 	if (err)
2094 		return err;
2095 
2096 	if (map == NULL) {
2097 		pr_warning("invalid map pointer\n");
2098 		return -EINVAL;
2099 	}
2100 
2101 	if (bpf_obj_pin(map->fd, path)) {
2102 		cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
2103 		pr_warning("failed to pin map: %s\n", cp);
2104 		return -errno;
2105 	}
2106 
2107 	pr_debug("pinned map '%s'\n", path);
2108 
2109 	return 0;
2110 }
2111 
2112 int bpf_map__unpin(struct bpf_map *map, const char *path)
2113 {
2114 	int err;
2115 
2116 	err = check_path(path);
2117 	if (err)
2118 		return err;
2119 
2120 	if (map == NULL) {
2121 		pr_warning("invalid map pointer\n");
2122 		return -EINVAL;
2123 	}
2124 
2125 	err = unlink(path);
2126 	if (err != 0)
2127 		return -errno;
2128 	pr_debug("unpinned map '%s'\n", path);
2129 
2130 	return 0;
2131 }
2132 
2133 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
2134 {
2135 	struct bpf_map *map;
2136 	int err;
2137 
2138 	if (!obj)
2139 		return -ENOENT;
2140 
2141 	if (!obj->loaded) {
2142 		pr_warning("object not yet loaded; load it first\n");
2143 		return -ENOENT;
2144 	}
2145 
2146 	err = make_dir(path);
2147 	if (err)
2148 		return err;
2149 
2150 	bpf_map__for_each(map, obj) {
2151 		char buf[PATH_MAX];
2152 		int len;
2153 
2154 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2155 			       bpf_map__name(map));
2156 		if (len < 0) {
2157 			err = -EINVAL;
2158 			goto err_unpin_maps;
2159 		} else if (len >= PATH_MAX) {
2160 			err = -ENAMETOOLONG;
2161 			goto err_unpin_maps;
2162 		}
2163 
2164 		err = bpf_map__pin(map, buf);
2165 		if (err)
2166 			goto err_unpin_maps;
2167 	}
2168 
2169 	return 0;
2170 
2171 err_unpin_maps:
2172 	while ((map = bpf_map__prev(map, obj))) {
2173 		char buf[PATH_MAX];
2174 		int len;
2175 
2176 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2177 			       bpf_map__name(map));
2178 		if (len < 0)
2179 			continue;
2180 		else if (len >= PATH_MAX)
2181 			continue;
2182 
2183 		bpf_map__unpin(map, buf);
2184 	}
2185 
2186 	return err;
2187 }
2188 
2189 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
2190 {
2191 	struct bpf_map *map;
2192 	int err;
2193 
2194 	if (!obj)
2195 		return -ENOENT;
2196 
2197 	bpf_map__for_each(map, obj) {
2198 		char buf[PATH_MAX];
2199 		int len;
2200 
2201 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2202 			       bpf_map__name(map));
2203 		if (len < 0)
2204 			return -EINVAL;
2205 		else if (len >= PATH_MAX)
2206 			return -ENAMETOOLONG;
2207 
2208 		err = bpf_map__unpin(map, buf);
2209 		if (err)
2210 			return err;
2211 	}
2212 
2213 	return 0;
2214 }
2215 
2216 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
2217 {
2218 	struct bpf_program *prog;
2219 	int err;
2220 
2221 	if (!obj)
2222 		return -ENOENT;
2223 
2224 	if (!obj->loaded) {
2225 		pr_warning("object not yet loaded; load it first\n");
2226 		return -ENOENT;
2227 	}
2228 
2229 	err = make_dir(path);
2230 	if (err)
2231 		return err;
2232 
2233 	bpf_object__for_each_program(prog, obj) {
2234 		char buf[PATH_MAX];
2235 		int len;
2236 
2237 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2238 			       prog->pin_name);
2239 		if (len < 0) {
2240 			err = -EINVAL;
2241 			goto err_unpin_programs;
2242 		} else if (len >= PATH_MAX) {
2243 			err = -ENAMETOOLONG;
2244 			goto err_unpin_programs;
2245 		}
2246 
2247 		err = bpf_program__pin(prog, buf);
2248 		if (err)
2249 			goto err_unpin_programs;
2250 	}
2251 
2252 	return 0;
2253 
2254 err_unpin_programs:
2255 	while ((prog = bpf_program__prev(prog, obj))) {
2256 		char buf[PATH_MAX];
2257 		int len;
2258 
2259 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2260 			       prog->pin_name);
2261 		if (len < 0)
2262 			continue;
2263 		else if (len >= PATH_MAX)
2264 			continue;
2265 
2266 		bpf_program__unpin(prog, buf);
2267 	}
2268 
2269 	return err;
2270 }
2271 
2272 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
2273 {
2274 	struct bpf_program *prog;
2275 	int err;
2276 
2277 	if (!obj)
2278 		return -ENOENT;
2279 
2280 	bpf_object__for_each_program(prog, obj) {
2281 		char buf[PATH_MAX];
2282 		int len;
2283 
2284 		len = snprintf(buf, PATH_MAX, "%s/%s", path,
2285 			       prog->pin_name);
2286 		if (len < 0)
2287 			return -EINVAL;
2288 		else if (len >= PATH_MAX)
2289 			return -ENAMETOOLONG;
2290 
2291 		err = bpf_program__unpin(prog, buf);
2292 		if (err)
2293 			return err;
2294 	}
2295 
2296 	return 0;
2297 }
2298 
2299 int bpf_object__pin(struct bpf_object *obj, const char *path)
2300 {
2301 	int err;
2302 
2303 	err = bpf_object__pin_maps(obj, path);
2304 	if (err)
2305 		return err;
2306 
2307 	err = bpf_object__pin_programs(obj, path);
2308 	if (err) {
2309 		bpf_object__unpin_maps(obj, path);
2310 		return err;
2311 	}
2312 
2313 	return 0;
2314 }
2315 
2316 void bpf_object__close(struct bpf_object *obj)
2317 {
2318 	size_t i;
2319 
2320 	if (!obj)
2321 		return;
2322 
2323 	if (obj->clear_priv)
2324 		obj->clear_priv(obj, obj->priv);
2325 
2326 	bpf_object__elf_finish(obj);
2327 	bpf_object__unload(obj);
2328 	btf__free(obj->btf);
2329 	btf_ext__free(obj->btf_ext);
2330 
2331 	for (i = 0; i < obj->nr_maps; i++) {
2332 		zfree(&obj->maps[i].name);
2333 		if (obj->maps[i].clear_priv)
2334 			obj->maps[i].clear_priv(&obj->maps[i],
2335 						obj->maps[i].priv);
2336 		obj->maps[i].priv = NULL;
2337 		obj->maps[i].clear_priv = NULL;
2338 	}
2339 	zfree(&obj->maps);
2340 	obj->nr_maps = 0;
2341 
2342 	if (obj->programs && obj->nr_programs) {
2343 		for (i = 0; i < obj->nr_programs; i++)
2344 			bpf_program__exit(&obj->programs[i]);
2345 	}
2346 	zfree(&obj->programs);
2347 
2348 	list_del(&obj->list);
2349 	free(obj);
2350 }
2351 
2352 struct bpf_object *
2353 bpf_object__next(struct bpf_object *prev)
2354 {
2355 	struct bpf_object *next;
2356 
2357 	if (!prev)
2358 		next = list_first_entry(&bpf_objects_list,
2359 					struct bpf_object,
2360 					list);
2361 	else
2362 		next = list_next_entry(prev, list);
2363 
2364 	/* Empty list is noticed here so don't need checking on entry. */
2365 	if (&next->list == &bpf_objects_list)
2366 		return NULL;
2367 
2368 	return next;
2369 }
2370 
2371 const char *bpf_object__name(struct bpf_object *obj)
2372 {
2373 	return obj ? obj->path : ERR_PTR(-EINVAL);
2374 }
2375 
2376 unsigned int bpf_object__kversion(struct bpf_object *obj)
2377 {
2378 	return obj ? obj->kern_version : 0;
2379 }
2380 
2381 int bpf_object__btf_fd(const struct bpf_object *obj)
2382 {
2383 	return obj->btf ? btf__fd(obj->btf) : -1;
2384 }
2385 
2386 int bpf_object__set_priv(struct bpf_object *obj, void *priv,
2387 			 bpf_object_clear_priv_t clear_priv)
2388 {
2389 	if (obj->priv && obj->clear_priv)
2390 		obj->clear_priv(obj, obj->priv);
2391 
2392 	obj->priv = priv;
2393 	obj->clear_priv = clear_priv;
2394 	return 0;
2395 }
2396 
2397 void *bpf_object__priv(struct bpf_object *obj)
2398 {
2399 	return obj ? obj->priv : ERR_PTR(-EINVAL);
2400 }
2401 
2402 static struct bpf_program *
2403 __bpf_program__iter(struct bpf_program *p, struct bpf_object *obj, bool forward)
2404 {
2405 	size_t nr_programs = obj->nr_programs;
2406 	ssize_t idx;
2407 
2408 	if (!nr_programs)
2409 		return NULL;
2410 
2411 	if (!p)
2412 		/* Iter from the beginning */
2413 		return forward ? &obj->programs[0] :
2414 			&obj->programs[nr_programs - 1];
2415 
2416 	if (p->obj != obj) {
2417 		pr_warning("error: program handler doesn't match object\n");
2418 		return NULL;
2419 	}
2420 
2421 	idx = (p - obj->programs) + (forward ? 1 : -1);
2422 	if (idx >= obj->nr_programs || idx < 0)
2423 		return NULL;
2424 	return &obj->programs[idx];
2425 }
2426 
2427 struct bpf_program *
2428 bpf_program__next(struct bpf_program *prev, struct bpf_object *obj)
2429 {
2430 	struct bpf_program *prog = prev;
2431 
2432 	do {
2433 		prog = __bpf_program__iter(prog, obj, true);
2434 	} while (prog && bpf_program__is_function_storage(prog, obj));
2435 
2436 	return prog;
2437 }
2438 
2439 struct bpf_program *
2440 bpf_program__prev(struct bpf_program *next, struct bpf_object *obj)
2441 {
2442 	struct bpf_program *prog = next;
2443 
2444 	do {
2445 		prog = __bpf_program__iter(prog, obj, false);
2446 	} while (prog && bpf_program__is_function_storage(prog, obj));
2447 
2448 	return prog;
2449 }
2450 
2451 int bpf_program__set_priv(struct bpf_program *prog, void *priv,
2452 			  bpf_program_clear_priv_t clear_priv)
2453 {
2454 	if (prog->priv && prog->clear_priv)
2455 		prog->clear_priv(prog, prog->priv);
2456 
2457 	prog->priv = priv;
2458 	prog->clear_priv = clear_priv;
2459 	return 0;
2460 }
2461 
2462 void *bpf_program__priv(struct bpf_program *prog)
2463 {
2464 	return prog ? prog->priv : ERR_PTR(-EINVAL);
2465 }
2466 
2467 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
2468 {
2469 	prog->prog_ifindex = ifindex;
2470 }
2471 
2472 const char *bpf_program__title(struct bpf_program *prog, bool needs_copy)
2473 {
2474 	const char *title;
2475 
2476 	title = prog->section_name;
2477 	if (needs_copy) {
2478 		title = strdup(title);
2479 		if (!title) {
2480 			pr_warning("failed to strdup program title\n");
2481 			return ERR_PTR(-ENOMEM);
2482 		}
2483 	}
2484 
2485 	return title;
2486 }
2487 
2488 int bpf_program__fd(struct bpf_program *prog)
2489 {
2490 	return bpf_program__nth_fd(prog, 0);
2491 }
2492 
2493 int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
2494 			  bpf_program_prep_t prep)
2495 {
2496 	int *instances_fds;
2497 
2498 	if (nr_instances <= 0 || !prep)
2499 		return -EINVAL;
2500 
2501 	if (prog->instances.nr > 0 || prog->instances.fds) {
2502 		pr_warning("Can't set pre-processor after loading\n");
2503 		return -EINVAL;
2504 	}
2505 
2506 	instances_fds = malloc(sizeof(int) * nr_instances);
2507 	if (!instances_fds) {
2508 		pr_warning("alloc memory failed for fds\n");
2509 		return -ENOMEM;
2510 	}
2511 
2512 	/* fill all fd with -1 */
2513 	memset(instances_fds, -1, sizeof(int) * nr_instances);
2514 
2515 	prog->instances.nr = nr_instances;
2516 	prog->instances.fds = instances_fds;
2517 	prog->preprocessor = prep;
2518 	return 0;
2519 }
2520 
2521 int bpf_program__nth_fd(struct bpf_program *prog, int n)
2522 {
2523 	int fd;
2524 
2525 	if (!prog)
2526 		return -EINVAL;
2527 
2528 	if (n >= prog->instances.nr || n < 0) {
2529 		pr_warning("Can't get the %dth fd from program %s: only %d instances\n",
2530 			   n, prog->section_name, prog->instances.nr);
2531 		return -EINVAL;
2532 	}
2533 
2534 	fd = prog->instances.fds[n];
2535 	if (fd < 0) {
2536 		pr_warning("%dth instance of program '%s' is invalid\n",
2537 			   n, prog->section_name);
2538 		return -ENOENT;
2539 	}
2540 
2541 	return fd;
2542 }
2543 
2544 void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
2545 {
2546 	prog->type = type;
2547 }
2548 
2549 static bool bpf_program__is_type(struct bpf_program *prog,
2550 				 enum bpf_prog_type type)
2551 {
2552 	return prog ? (prog->type == type) : false;
2553 }
2554 
2555 #define BPF_PROG_TYPE_FNS(NAME, TYPE)			\
2556 int bpf_program__set_##NAME(struct bpf_program *prog)	\
2557 {							\
2558 	if (!prog)					\
2559 		return -EINVAL;				\
2560 	bpf_program__set_type(prog, TYPE);		\
2561 	return 0;					\
2562 }							\
2563 							\
2564 bool bpf_program__is_##NAME(struct bpf_program *prog)	\
2565 {							\
2566 	return bpf_program__is_type(prog, TYPE);	\
2567 }							\
2568 
2569 BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
2570 BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
2571 BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
2572 BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
2573 BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
2574 BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
2575 BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
2576 BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
2577 
2578 void bpf_program__set_expected_attach_type(struct bpf_program *prog,
2579 					   enum bpf_attach_type type)
2580 {
2581 	prog->expected_attach_type = type;
2582 }
2583 
2584 #define BPF_PROG_SEC_IMPL(string, ptype, eatype, is_attachable, atype) \
2585 	{ string, sizeof(string) - 1, ptype, eatype, is_attachable, atype }
2586 
2587 /* Programs that can NOT be attached. */
2588 #define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0)
2589 
2590 /* Programs that can be attached. */
2591 #define BPF_APROG_SEC(string, ptype, atype) \
2592 	BPF_PROG_SEC_IMPL(string, ptype, 0, 1, atype)
2593 
2594 /* Programs that must specify expected attach type at load time. */
2595 #define BPF_EAPROG_SEC(string, ptype, eatype) \
2596 	BPF_PROG_SEC_IMPL(string, ptype, eatype, 1, eatype)
2597 
2598 /* Programs that can be attached but attach type can't be identified by section
2599  * name. Kept for backward compatibility.
2600  */
2601 #define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
2602 
2603 static const struct {
2604 	const char *sec;
2605 	size_t len;
2606 	enum bpf_prog_type prog_type;
2607 	enum bpf_attach_type expected_attach_type;
2608 	int is_attachable;
2609 	enum bpf_attach_type attach_type;
2610 } section_names[] = {
2611 	BPF_PROG_SEC("socket",			BPF_PROG_TYPE_SOCKET_FILTER),
2612 	BPF_PROG_SEC("kprobe/",			BPF_PROG_TYPE_KPROBE),
2613 	BPF_PROG_SEC("kretprobe/",		BPF_PROG_TYPE_KPROBE),
2614 	BPF_PROG_SEC("classifier",		BPF_PROG_TYPE_SCHED_CLS),
2615 	BPF_PROG_SEC("action",			BPF_PROG_TYPE_SCHED_ACT),
2616 	BPF_PROG_SEC("tracepoint/",		BPF_PROG_TYPE_TRACEPOINT),
2617 	BPF_PROG_SEC("raw_tracepoint/",		BPF_PROG_TYPE_RAW_TRACEPOINT),
2618 	BPF_PROG_SEC("xdp",			BPF_PROG_TYPE_XDP),
2619 	BPF_PROG_SEC("perf_event",		BPF_PROG_TYPE_PERF_EVENT),
2620 	BPF_PROG_SEC("lwt_in",			BPF_PROG_TYPE_LWT_IN),
2621 	BPF_PROG_SEC("lwt_out",			BPF_PROG_TYPE_LWT_OUT),
2622 	BPF_PROG_SEC("lwt_xmit",		BPF_PROG_TYPE_LWT_XMIT),
2623 	BPF_PROG_SEC("lwt_seg6local",		BPF_PROG_TYPE_LWT_SEG6LOCAL),
2624 	BPF_APROG_SEC("cgroup_skb/ingress",	BPF_PROG_TYPE_CGROUP_SKB,
2625 						BPF_CGROUP_INET_INGRESS),
2626 	BPF_APROG_SEC("cgroup_skb/egress",	BPF_PROG_TYPE_CGROUP_SKB,
2627 						BPF_CGROUP_INET_EGRESS),
2628 	BPF_APROG_COMPAT("cgroup/skb",		BPF_PROG_TYPE_CGROUP_SKB),
2629 	BPF_APROG_SEC("cgroup/sock",		BPF_PROG_TYPE_CGROUP_SOCK,
2630 						BPF_CGROUP_INET_SOCK_CREATE),
2631 	BPF_EAPROG_SEC("cgroup/post_bind4",	BPF_PROG_TYPE_CGROUP_SOCK,
2632 						BPF_CGROUP_INET4_POST_BIND),
2633 	BPF_EAPROG_SEC("cgroup/post_bind6",	BPF_PROG_TYPE_CGROUP_SOCK,
2634 						BPF_CGROUP_INET6_POST_BIND),
2635 	BPF_APROG_SEC("cgroup/dev",		BPF_PROG_TYPE_CGROUP_DEVICE,
2636 						BPF_CGROUP_DEVICE),
2637 	BPF_APROG_SEC("sockops",		BPF_PROG_TYPE_SOCK_OPS,
2638 						BPF_CGROUP_SOCK_OPS),
2639 	BPF_APROG_SEC("sk_skb/stream_parser",	BPF_PROG_TYPE_SK_SKB,
2640 						BPF_SK_SKB_STREAM_PARSER),
2641 	BPF_APROG_SEC("sk_skb/stream_verdict",	BPF_PROG_TYPE_SK_SKB,
2642 						BPF_SK_SKB_STREAM_VERDICT),
2643 	BPF_APROG_COMPAT("sk_skb",		BPF_PROG_TYPE_SK_SKB),
2644 	BPF_APROG_SEC("sk_msg",			BPF_PROG_TYPE_SK_MSG,
2645 						BPF_SK_MSG_VERDICT),
2646 	BPF_APROG_SEC("lirc_mode2",		BPF_PROG_TYPE_LIRC_MODE2,
2647 						BPF_LIRC_MODE2),
2648 	BPF_APROG_SEC("flow_dissector",		BPF_PROG_TYPE_FLOW_DISSECTOR,
2649 						BPF_FLOW_DISSECTOR),
2650 	BPF_EAPROG_SEC("cgroup/bind4",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2651 						BPF_CGROUP_INET4_BIND),
2652 	BPF_EAPROG_SEC("cgroup/bind6",		BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2653 						BPF_CGROUP_INET6_BIND),
2654 	BPF_EAPROG_SEC("cgroup/connect4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2655 						BPF_CGROUP_INET4_CONNECT),
2656 	BPF_EAPROG_SEC("cgroup/connect6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2657 						BPF_CGROUP_INET6_CONNECT),
2658 	BPF_EAPROG_SEC("cgroup/sendmsg4",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2659 						BPF_CGROUP_UDP4_SENDMSG),
2660 	BPF_EAPROG_SEC("cgroup/sendmsg6",	BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
2661 						BPF_CGROUP_UDP6_SENDMSG),
2662 };
2663 
2664 #undef BPF_PROG_SEC_IMPL
2665 #undef BPF_PROG_SEC
2666 #undef BPF_APROG_SEC
2667 #undef BPF_EAPROG_SEC
2668 #undef BPF_APROG_COMPAT
2669 
2670 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
2671 			     enum bpf_attach_type *expected_attach_type)
2672 {
2673 	int i;
2674 
2675 	if (!name)
2676 		return -EINVAL;
2677 
2678 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2679 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2680 			continue;
2681 		*prog_type = section_names[i].prog_type;
2682 		*expected_attach_type = section_names[i].expected_attach_type;
2683 		return 0;
2684 	}
2685 	return -EINVAL;
2686 }
2687 
2688 int libbpf_attach_type_by_name(const char *name,
2689 			       enum bpf_attach_type *attach_type)
2690 {
2691 	int i;
2692 
2693 	if (!name)
2694 		return -EINVAL;
2695 
2696 	for (i = 0; i < ARRAY_SIZE(section_names); i++) {
2697 		if (strncmp(name, section_names[i].sec, section_names[i].len))
2698 			continue;
2699 		if (!section_names[i].is_attachable)
2700 			return -EINVAL;
2701 		*attach_type = section_names[i].attach_type;
2702 		return 0;
2703 	}
2704 	return -EINVAL;
2705 }
2706 
2707 static int
2708 bpf_program__identify_section(struct bpf_program *prog,
2709 			      enum bpf_prog_type *prog_type,
2710 			      enum bpf_attach_type *expected_attach_type)
2711 {
2712 	return libbpf_prog_type_by_name(prog->section_name, prog_type,
2713 					expected_attach_type);
2714 }
2715 
2716 int bpf_map__fd(struct bpf_map *map)
2717 {
2718 	return map ? map->fd : -EINVAL;
2719 }
2720 
2721 const struct bpf_map_def *bpf_map__def(struct bpf_map *map)
2722 {
2723 	return map ? &map->def : ERR_PTR(-EINVAL);
2724 }
2725 
2726 const char *bpf_map__name(struct bpf_map *map)
2727 {
2728 	return map ? map->name : NULL;
2729 }
2730 
2731 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
2732 {
2733 	return map ? map->btf_key_type_id : 0;
2734 }
2735 
2736 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
2737 {
2738 	return map ? map->btf_value_type_id : 0;
2739 }
2740 
2741 int bpf_map__set_priv(struct bpf_map *map, void *priv,
2742 		     bpf_map_clear_priv_t clear_priv)
2743 {
2744 	if (!map)
2745 		return -EINVAL;
2746 
2747 	if (map->priv) {
2748 		if (map->clear_priv)
2749 			map->clear_priv(map, map->priv);
2750 	}
2751 
2752 	map->priv = priv;
2753 	map->clear_priv = clear_priv;
2754 	return 0;
2755 }
2756 
2757 void *bpf_map__priv(struct bpf_map *map)
2758 {
2759 	return map ? map->priv : ERR_PTR(-EINVAL);
2760 }
2761 
2762 bool bpf_map__is_offload_neutral(struct bpf_map *map)
2763 {
2764 	return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
2765 }
2766 
2767 void bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
2768 {
2769 	map->map_ifindex = ifindex;
2770 }
2771 
2772 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
2773 {
2774 	if (!bpf_map_type__is_map_in_map(map->def.type)) {
2775 		pr_warning("error: unsupported map type\n");
2776 		return -EINVAL;
2777 	}
2778 	if (map->inner_map_fd != -1) {
2779 		pr_warning("error: inner_map_fd already specified\n");
2780 		return -EINVAL;
2781 	}
2782 	map->inner_map_fd = fd;
2783 	return 0;
2784 }
2785 
2786 static struct bpf_map *
2787 __bpf_map__iter(struct bpf_map *m, struct bpf_object *obj, int i)
2788 {
2789 	ssize_t idx;
2790 	struct bpf_map *s, *e;
2791 
2792 	if (!obj || !obj->maps)
2793 		return NULL;
2794 
2795 	s = obj->maps;
2796 	e = obj->maps + obj->nr_maps;
2797 
2798 	if ((m < s) || (m >= e)) {
2799 		pr_warning("error in %s: map handler doesn't belong to object\n",
2800 			   __func__);
2801 		return NULL;
2802 	}
2803 
2804 	idx = (m - obj->maps) + i;
2805 	if (idx >= obj->nr_maps || idx < 0)
2806 		return NULL;
2807 	return &obj->maps[idx];
2808 }
2809 
2810 struct bpf_map *
2811 bpf_map__next(struct bpf_map *prev, struct bpf_object *obj)
2812 {
2813 	if (prev == NULL)
2814 		return obj->maps;
2815 
2816 	return __bpf_map__iter(prev, obj, 1);
2817 }
2818 
2819 struct bpf_map *
2820 bpf_map__prev(struct bpf_map *next, struct bpf_object *obj)
2821 {
2822 	if (next == NULL) {
2823 		if (!obj->nr_maps)
2824 			return NULL;
2825 		return obj->maps + obj->nr_maps - 1;
2826 	}
2827 
2828 	return __bpf_map__iter(next, obj, -1);
2829 }
2830 
2831 struct bpf_map *
2832 bpf_object__find_map_by_name(struct bpf_object *obj, const char *name)
2833 {
2834 	struct bpf_map *pos;
2835 
2836 	bpf_map__for_each(pos, obj) {
2837 		if (pos->name && !strcmp(pos->name, name))
2838 			return pos;
2839 	}
2840 	return NULL;
2841 }
2842 
2843 struct bpf_map *
2844 bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
2845 {
2846 	int i;
2847 
2848 	for (i = 0; i < obj->nr_maps; i++) {
2849 		if (obj->maps[i].offset == offset)
2850 			return &obj->maps[i];
2851 	}
2852 	return ERR_PTR(-ENOENT);
2853 }
2854 
2855 long libbpf_get_error(const void *ptr)
2856 {
2857 	if (IS_ERR(ptr))
2858 		return PTR_ERR(ptr);
2859 	return 0;
2860 }
2861 
2862 int bpf_prog_load(const char *file, enum bpf_prog_type type,
2863 		  struct bpf_object **pobj, int *prog_fd)
2864 {
2865 	struct bpf_prog_load_attr attr;
2866 
2867 	memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
2868 	attr.file = file;
2869 	attr.prog_type = type;
2870 	attr.expected_attach_type = 0;
2871 
2872 	return bpf_prog_load_xattr(&attr, pobj, prog_fd);
2873 }
2874 
2875 int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
2876 			struct bpf_object **pobj, int *prog_fd)
2877 {
2878 	struct bpf_object_open_attr open_attr = {
2879 		.file		= attr->file,
2880 		.prog_type	= attr->prog_type,
2881 	};
2882 	struct bpf_program *prog, *first_prog = NULL;
2883 	enum bpf_attach_type expected_attach_type;
2884 	enum bpf_prog_type prog_type;
2885 	struct bpf_object *obj;
2886 	struct bpf_map *map;
2887 	int err;
2888 
2889 	if (!attr)
2890 		return -EINVAL;
2891 	if (!attr->file)
2892 		return -EINVAL;
2893 
2894 	obj = bpf_object__open_xattr(&open_attr);
2895 	if (IS_ERR_OR_NULL(obj))
2896 		return -ENOENT;
2897 
2898 	bpf_object__for_each_program(prog, obj) {
2899 		/*
2900 		 * If type is not specified, try to guess it based on
2901 		 * section name.
2902 		 */
2903 		prog_type = attr->prog_type;
2904 		prog->prog_ifindex = attr->ifindex;
2905 		expected_attach_type = attr->expected_attach_type;
2906 		if (prog_type == BPF_PROG_TYPE_UNSPEC) {
2907 			err = bpf_program__identify_section(prog, &prog_type,
2908 							    &expected_attach_type);
2909 			if (err < 0) {
2910 				pr_warning("failed to guess program type based on section name %s\n",
2911 					   prog->section_name);
2912 				bpf_object__close(obj);
2913 				return -EINVAL;
2914 			}
2915 		}
2916 
2917 		bpf_program__set_type(prog, prog_type);
2918 		bpf_program__set_expected_attach_type(prog,
2919 						      expected_attach_type);
2920 
2921 		if (!first_prog)
2922 			first_prog = prog;
2923 	}
2924 
2925 	bpf_map__for_each(map, obj) {
2926 		if (!bpf_map__is_offload_neutral(map))
2927 			map->map_ifindex = attr->ifindex;
2928 	}
2929 
2930 	if (!first_prog) {
2931 		pr_warning("object file doesn't contain bpf program\n");
2932 		bpf_object__close(obj);
2933 		return -ENOENT;
2934 	}
2935 
2936 	err = bpf_object__load(obj);
2937 	if (err) {
2938 		bpf_object__close(obj);
2939 		return -EINVAL;
2940 	}
2941 
2942 	*pobj = obj;
2943 	*prog_fd = bpf_program__fd(first_prog);
2944 	return 0;
2945 }
2946 
2947 enum bpf_perf_event_ret
2948 bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
2949 			   void **copy_mem, size_t *copy_size,
2950 			   bpf_perf_event_print_t fn, void *private_data)
2951 {
2952 	struct perf_event_mmap_page *header = mmap_mem;
2953 	__u64 data_head = ring_buffer_read_head(header);
2954 	__u64 data_tail = header->data_tail;
2955 	void *base = ((__u8 *)header) + page_size;
2956 	int ret = LIBBPF_PERF_EVENT_CONT;
2957 	struct perf_event_header *ehdr;
2958 	size_t ehdr_size;
2959 
2960 	while (data_head != data_tail) {
2961 		ehdr = base + (data_tail & (mmap_size - 1));
2962 		ehdr_size = ehdr->size;
2963 
2964 		if (((void *)ehdr) + ehdr_size > base + mmap_size) {
2965 			void *copy_start = ehdr;
2966 			size_t len_first = base + mmap_size - copy_start;
2967 			size_t len_secnd = ehdr_size - len_first;
2968 
2969 			if (*copy_size < ehdr_size) {
2970 				free(*copy_mem);
2971 				*copy_mem = malloc(ehdr_size);
2972 				if (!*copy_mem) {
2973 					*copy_size = 0;
2974 					ret = LIBBPF_PERF_EVENT_ERROR;
2975 					break;
2976 				}
2977 				*copy_size = ehdr_size;
2978 			}
2979 
2980 			memcpy(*copy_mem, copy_start, len_first);
2981 			memcpy(*copy_mem + len_first, base, len_secnd);
2982 			ehdr = *copy_mem;
2983 		}
2984 
2985 		ret = fn(ehdr, private_data);
2986 		data_tail += ehdr_size;
2987 		if (ret != LIBBPF_PERF_EVENT_CONT)
2988 			break;
2989 	}
2990 
2991 	ring_buffer_write_tail(header, data_tail);
2992 	return ret;
2993 }
2994